Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlock


   
    LOG.info("Reporting bad block " + block + " to namenode.");
   
    try {
      DatanodeInfo[] dnArr = { new DatanodeInfo(datanode.getDNRegistrationForNS(namespaceId)) };
      LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) };
      datanode.reportBadBlocks(namespaceId,blocks);
    } catch (IOException e){
      /* One common reason is that NameNode could be in safe mode.
       * Should we keep on retrying in that case?
       */
 
View Full Code Here


        true);
    if (blocks != null) {
      blockManager.getDatanodeManager().sortLocatedBlocks(
          clientMachine, blocks.getLocatedBlocks());
     
      LocatedBlock lastBlock = blocks.getLastLocatedBlock();
      if (lastBlock != null) {
        ArrayList<LocatedBlock> lastBlockList = new ArrayList<LocatedBlock>();
        lastBlockList.add(lastBlock);
        blockManager.getDatanodeManager().sortLocatedBlocks(
                              clientMachine, lastBlockList);
View Full Code Here

        leaseHolder, clientMachine, clientNode);

    dir.replaceINodeFile(src, file, cons);
    leaseManager.addLease(cons.getClientName(), src);
   
    LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);
    if (writeToEditLog) {
      getEditLog().logOpenFile(src, cons, logRetryCache);
    }
    return ret;
  }
View Full Code Here

   */
  LocatedBlock appendFile(String src, String holder, String clientMachine)
      throws AccessControlException, SafeModeException,
      FileAlreadyExistsException, FileNotFoundException,
      ParentNotDirectoryException, IOException {
    LocatedBlock lb = null;
    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
        null);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
      return (LocatedBlock) cacheEntry.getPayload();
    }
View Full Code Here

      throw new UnsupportedOperationException(
          "Append is not enabled on this NameNode. Use the " +
          DFS_SUPPORT_APPEND_KEY + " configuration option to enable it.");
    }

    LocatedBlock lb = null;
    FSPermissionChecker pc = getPermissionChecker();
    checkOperation(OperationCategory.WRITE);
    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
    writeLock();
    try {
      checkOperation(OperationCategory.WRITE);
      if (isInSafeMode()) {
        throw new SafeModeException("Cannot append to file" + src, safeMode);
      }
      src = FSDirectory.resolvePath(src, pathComponents, dir);
      lb = appendFileInternal(pc, src, holder, clientMachine, logRetryCache);
    } catch (StandbyException se) {
      skipSync = true;
      throw se;
    } finally {
      writeUnlock();
      // There might be transactions logged while trying to recover the lease.
      // They need to be sync'ed even when an exception was thrown.
      if (!skipSync) {
        getEditLog().logSync();
      }
    }
    if (lb != null) {
      if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file "
            +src+" for "+holder+" at "+clientMachine
            +" block " + lb.getBlock()
            +" block size " + lb.getBlock().getNumBytes());
      }
    }
    logAuditEvent(true, "append", src);
    return lb;
  }
View Full Code Here

  }

  LocatedBlock makeLocatedBlock(Block blk,
                                        DatanodeInfo[] locs,
                                        long offset) throws IOException {
    LocatedBlock lBlk = new LocatedBlock(
        getExtendedBlock(blk), locs, offset);
    getBlockManager().setBlockToken(
        lBlk, BlockTokenSecretManager.AccessMode.WRITE);
    return lBlk;
  }
View Full Code Here

    // choose new datanodes.
    final DatanodeInfo[] targets = blockManager.getBlockPlacementPolicy(
        ).chooseTarget(src, numAdditionalNodes, clientnode, chosen, true,
        excludes, preferredblocksize);
    final LocatedBlock lb = new LocatedBlock(blk, targets);
    blockManager.setBlockToken(lb, AccessMode.COPY);
    return lb;
  }
View Full Code Here

    // Create a file with partial block
    DFSTestUtil.createFile(filesystem, new Path(src), 128, (short)1, 0L);
   
    // Retried append requests succeed
    newCall();
    LocatedBlock b = namesystem.appendFile(src, "holder", "clientMachine");
    Assert.assertEquals(b, namesystem.appendFile(src, "holder", "clientMachine"));
    Assert.assertEquals(b, namesystem.appendFile(src, "holder", "clientMachine"));
   
    // non-retried call fails
    newCall();
View Full Code Here

   * @return a located block with a new generation stamp and an access token
   * @throws IOException if any error occurs
   */
  LocatedBlock updateBlockForPipeline(ExtendedBlock block,
      String clientName) throws IOException {
    LocatedBlock locatedBlock;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
      checkOperation(OperationCategory.WRITE);

      // check vadility of parameters
      checkUCBlock(block, clientName);
 
      // get a new generation stamp and an access token
      block.setGenerationStamp(
          nextGenerationStamp(isLegacyBlock(block.getLocalBlock())));
      locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]);
      blockManager.setBlockToken(locatedBlock, AccessMode.WRITE);
    } finally {
      writeUnlock();
    }
    // Ensure we record the new generation stamp
View Full Code Here

          .getLocatedBlocks(file, BlockSize + 1);
      assertEquals(1, blks.getLocatedBlocks().size());
      nodes = blks.get(0).getLocations();
      oldBlock = blks.get(0).getBlock();
     
      LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline(
          oldBlock, client.getClientName());
      newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
          oldBlock.getBlockId(), oldBlock.getNumBytes(),
          newLbk.getBlock().getGenerationStamp());
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.