Package org.apache.hadoop.hdfs.client

Examples of org.apache.hadoop.hdfs.client.HdfsDataOutputStream


  private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
      throws IOException {
    byte[] toAppend = new byte[length];
    Random random = new Random();
    random.nextBytes(toAppend);
    HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
    out.write(toAppend);
    return out;
  }
View Full Code Here


    // 1. create snapshot s0
    hdfs.allowSnapshot(dir);
    hdfs.createSnapshot(dir, "s0");
   
    // 2. create snapshot s1 before appending sub1file1 finishes
    HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    // also append sub1file2
    DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s1");
    out.close();
   
    // 3. create snapshot s2 before appending finishes
    out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    hdfs.createSnapshot(dir, "s2");
    out.close();
   
    // 4. save fsimage before appending finishes
    out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    // dump fsdir
    File fsnBefore = dumpTree2File("before");
    // save the namesystem to a temp file
    File imageFile = saveFSImageToTempFile();
   
    // 5. load fsimage and compare
    // first restart the cluster, and format the cluster
    out.close();
    cluster.shutdown();
    cluster = new MiniDFSCluster.Builder(conf).format(true)
        .numDataNodes(REPLICATION).build();
    cluster.waitActive();
    fsn = cluster.getNamesystem();
View Full Code Here

    DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
   
    hdfs.allowSnapshot(dir);
    hdfs.createSnapshot(dir, "s0");
   
    HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));     
   
    // save namespace and restart cluster
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    hdfs.saveNamespace();
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
View Full Code Here

  private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
      throws IOException {
    byte[] toAppend = new byte[length];
    Random random = new Random();
    random.nextBytes(toAppend);
    HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
    out.write(toAppend);
    return out;
  }
View Full Code Here

  public void testSnapshotWhileAppending() throws Exception {
    Path file = new Path(dir, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
   
    // 1. append without closing stream --> create snapshot
    HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
    // deleted list, with size BLOCKSIZE*2
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
    INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
        .getINode(dir.toString());
    DirectoryDiff last = dirNode.getDiffs().getLast();
   
    // 2. append without closing stream
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
   
    // re-check nodeInDeleted_S0
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
   
    // 3. take snapshot --> close stream
    hdfs.createSnapshot(dir, "s1");
    out.close();
   
    // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
    // have been stored in s1's deleted list
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    last = dirNode.getDiffs().getLast();
    assertTrue(fileNode.isWithSnapshot());
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
   
    // 4. modify file --> append without closing stream --> take snapshot -->
    // close stream
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s2");
    out.close();
   
    // re-check the size of nodeInDeleted_S1
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
  }
View Full Code Here

    SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
    final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root,
        "s2", file.getName());
   
    // append data to file without closing
    HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
   
    status = hdfs.getFileStatus(fileInSnapshot2);
    // the size of snapshot file should be BLOCKSIZE*2-1
    assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
    // the size of the file should be (3 * BLOCKSIZE - 1)
    status = hdfs.getFileStatus(file);
    assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
   
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
        fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
    assertFalse(blocks.isUnderConstruction());
    assertTrue(blocks.isLastBlockComplete());
    blockList = blocks.getLocatedBlocks();
   
    // should be 2 blocks
    assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
    assertEquals(2, blockList.size());
   
    // check the last block
    lastBlock = blocks.getLastLocatedBlock();
    assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
    assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
   
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
        fileInSnapshot2.toString(), BLOCKSIZE, 0);
    blockList = blocks.getLocatedBlocks();
    assertEquals(1, blockList.size());
   
    // check blocks for file being written
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
        file.toString(), 0, Long.MAX_VALUE);
    blockList = blocks.getLocatedBlocks();
    assertEquals(3, blockList.size());
    assertTrue(blocks.isUnderConstruction());
    assertFalse(blocks.isLastBlockComplete());
   
    lastBlock = blocks.getLastLocatedBlock();
    assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
    assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
    out.close();
  }
View Full Code Here

  // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
  // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
  public void testCheckCommit() throws IOException {
    DFSClient dfsClient = Mockito.mock(DFSClient.class);
    Nfs3FileAttributes attr = new Nfs3FileAttributes();
    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
    Mockito.when(fos.getPos()).thenReturn((long) 0);

    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
        new IdUserGroup());

    COMMIT_STATUS ret;

    // Test inactive open file context
    ctx.setActiveStatusForTest(false);
    Channel ch = Mockito.mock(Channel.class);
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);

    ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
        new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);

    // Test request with non zero commit offset
    ctx.setActiveStatusForTest(true);
    Mockito.when(fos.getPos()).thenReturn((long) 10);
    COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
    Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
    // Do_SYNC state will be updated to FINISHED after data sync
    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
    Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
View Full Code Here

  // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX,
  // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
  public void testCheckCommitFromRead() throws IOException {
    DFSClient dfsClient = Mockito.mock(DFSClient.class);
    Nfs3FileAttributes attr = new Nfs3FileAttributes();
    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
    Mockito.when(fos.getPos()).thenReturn((long) 0);

    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
        new IdUserGroup());

    FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
    COMMIT_STATUS ret;
    WriteManager wm = new WriteManager(new IdUserGroup(), new Configuration());
    assertTrue(wm.addOpenFileStream(h, ctx));
   
    // Test inactive open file context
    ctx.setActiveStatusForTest(false);
    Channel ch = Mockito.mock(Channel.class);
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
   
    ctx.getPendingWritesForTest().put(new OffsetRange(5, 10),
        new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
    assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0));
   
    // Test request with non zero commit offset
    ctx.setActiveStatusForTest(true);
    Mockito.when(fos.getPos()).thenReturn((long) 10);
    COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
    assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
    // Do_SYNC state will be updated to FINISHED after data sync
    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
View Full Code Here

    final long numBlocks = 2;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY, numBlocks);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    HdfsDataOutputStream fout =
        (HdfsDataOutputStream)fs.create(new Path("/testmaxfilelimit"));
    try {
      // Write maximum number of blocks
      fout.write(new byte[(int)blockSize*(int)numBlocks]);
      fout.hflush();
      // Try to write one more block
      try {
        fout.write(new byte[1]);
        fout.hflush();
        assert false : "Expected IOException after writing too many blocks";
      } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("File has reached the limit" +
            " on maximum number of", e);
      }
View Full Code Here

    EnumSet<CreateFlag> cflags, int bufferSize, short replication, long blockSize,
    Progressable progress, ChecksumOpt checksumOpt) throws IOException {
    statistics.incrementWriteOps(1);
    final DFSOutputStream out = dfs.create(getPathName(f), permission, cflags,
        replication, blockSize, progress, bufferSize, checksumOpt);
    return new HdfsDataOutputStream(out, statistics);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.client.HdfsDataOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.