Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.ExtendedBlock


    private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException {
      LocatedBlock lb = null;
      DatanodeInfo[] nodes = null;
      int count = dfsClient.getConf().nBlockWriteRetry;
      boolean success = false;
      ExtendedBlock oldBlock = block;
      do {
        hasError = false;
        lastException = null;
        errorIndex = -1;
        success = false;
View Full Code Here


      }
    }
   
    /* Send a block replace request to the output stream*/
    private void sendRequest(DataOutputStream out) throws IOException {
      final ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock());
      final Token<BlockTokenIdentifier> accessToken = nnc.getAccessToken(eb);
      new Sender(out).replaceBlock(eb, accessToken,
          source.getStorageID(), proxySource.getDatanode());
    }
View Full Code Here

      }
    }
   
    /* Send a block replace request to the output stream*/
    private void sendRequest(DataOutputStream out) throws IOException {
      final ExtendedBlock eb = new ExtendedBlock(nnc.blockpoolID, block.getBlock());
      final Token<BlockTokenIdentifier> accessToken = nnc.getAccessToken(eb);
      new Sender(out).replaceBlock(eb, accessToken,
          source.getStorageID(), proxySource.getDatanode());
    }
View Full Code Here

      long fileId = ((DFSOutputStream)create.
          getWrappedStream()).getFileId();
      FileStatus fileStatus = dfs.getFileStatus(filePath);
      DFSClient client = DFSClientAdapter.getClient(dfs);
      // add one dummy block at NN, but not write to DataNode
      ExtendedBlock previousBlock =
          DFSClientAdapter.getPreviousBlock(client, fileId);
      DFSClientAdapter.getNamenode(client).addBlock(
          pathString,
          client.getClientName(),
          new ExtendedBlock(previousBlock),
          new DatanodeInfo[0],
          DFSClientAdapter.getFileId((DFSOutputStream) create
              .getWrappedStream()), null);
      cluster.restartNameNode(0, true);
      cluster.restartDataNode(0);
View Full Code Here

    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc()
        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
            null, barNode.getId(), null);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
View Full Code Here

    hdfs.append(bar);

    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc()
        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
            null, barNode.getId(), null);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
View Full Code Here

    hdfs.append(bar);

    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc()
        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
            null, barNode.getId(), null);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
View Full Code Here

        SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs,
            new Path("/"), "s1");
       
        // Grab the block info of this file for later use.
        FSDataInputStream in = null;
        ExtendedBlock oldBlock = null;
        try {
          in = fs.open(file);
          oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
        } finally {
          IOUtils.closeStream(in);
        }
       
        // Allocate a new block ID/gen stamp so we can simulate pipeline
        // recovery.
        String clientName = ((DistributedFileSystem)fs).getClient()
            .getClientName();
        LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(
            oldBlock, clientName);
        ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
            oldBlock.getBlockId(), oldBlock.getNumBytes(),
            newLocatedBlock.getBlock().getGenerationStamp());

        // Delete the file from the present FS. It will still exist the
        // previously-created snapshot. This will log an OP_DELETE for the
View Full Code Here

      HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
          cluster.getNameNode(1));
     
      // Change the gen stamp of the block on datanode to go back in time (gen
      // stamps start at 1000)
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
      assertTrue(MiniDFSCluster.changeGenStampOfBlock(0, block, 900));
     
      // Stop the DN so the replica with the changed gen stamp will be reported
      // when this DN starts up.
      DataNodeProperties dnProps = cluster.stopDataNode(0);
View Full Code Here

      for(int idx=0; idx < nrFiles; idx++) {
        String fileName = nameGenerator.getNextFileName("ThroughputBench");
        nameNodeProto.create(fileName, FsPermission.getDefault(), clientName,
            new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
            BLOCK_SIZE);
        ExtendedBlock lastBlock = addBlocks(fileName, clientName);
        nameNodeProto.complete(fileName, clientName, lastBlock, INodeId.GRANDFATHER_INODE_ID);
      }
      // prepare block reports
      for(int idx=0; idx < nrDatanodes; idx++) {
        datanodes[idx].formBlockReport();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.ExtendedBlock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.