Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlock


  public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) {
    if (lb == null) return null;
    final int len = lb.length;
    LocatedBlock[] result = new LocatedBlock[len];
    for (int i = 0; i < len; ++i) {
      result[i] = new LocatedBlock(
          PBHelper.convert(lb[i].getB()),
          PBHelper.convert(lb[i].getLocsList()),
          lb[i].getOffset(), lb[i].getCorrupt());
    }
    return result;
View Full Code Here


 
  @Override
  public AppendResponseProto append(RpcController controller,
      AppendRequestProto req) throws ServiceException {
    try {
      LocatedBlock result = server.append(req.getSrc(), req.getClientName());
      if (result != null) {
        return AppendResponseProto.newBuilder()
            .setBlock(PBHelper.convert(result)).build();
      }
      return NULL_APPEND_RESPONSE;
View Full Code Here

  public AddBlockResponseProto addBlock(RpcController controller,
      AddBlockRequestProto req) throws ServiceException {
   
    try {
      List<DatanodeInfoProto> excl = req.getExcludeNodesList();
      LocatedBlock result = server.addBlock(req.getSrc(), req.getClientName(),
          req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
          (excl == null ||
           excl.size() == 0) ? null :
            PBHelper.convert(excl.toArray(new DatanodeInfoProto[excl.size()])));
      return AddBlockResponseProto.newBuilder().setBlock(
View Full Code Here

      RpcController controller, GetAdditionalDatanodeRequestProto req)
      throws ServiceException {
    try {
      List<DatanodeInfoProto> existingList = req.getExistingsList();
      List<DatanodeInfoProto> excludesList = req.getExcludesList();
      LocatedBlock result = server.getAdditionalDatanode(
          req.getSrc(), PBHelper.convert(req.getBlk()),
          PBHelper.convert(existingList.toArray(
              new DatanodeInfoProto[existingList.size()])),
          PBHelper.convert(excludesList.toArray(
              new DatanodeInfoProto[excludesList.size()])),
View Full Code Here

    String clientMachine = getClientMachine();
    if (stateChangeLog.isDebugEnabled()) {
      stateChangeLog.debug("*DIR* NameNode.append: file "
          +src+" for "+clientName+" at "+clientMachine);
    }
    LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
    metrics.incrFilesAppended();
    return info;
  }
View Full Code Here

      excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length);
      for (Node node:excludedNodes) {
        excludedNodesSet.put(node, node);
      }
    }
    LocatedBlock locatedBlock =
      namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
    if (locatedBlock != null)
      metrics.incrAddBlockOps();
    return locatedBlock;
  }
View Full Code Here

  }

  /** Method to get stream returned by append call */
  private DFSOutputStream callAppend(HdfsFileStatus stat, String src,
      int buffersize, Progressable progress) throws IOException {
    LocatedBlock lastBlock = null;
    try {
      lastBlock = namenode.append(src, clientName);
    } catch(RemoteException re) {
      throw re.unwrapRemoteException(AccessControlException.class,
                                     FileNotFoundException.class,
View Full Code Here

          throw new FileNotFoundException("File does not exist: " + src);
        }
        locatedblocks = blockLocations.getLocatedBlocks();
        refetchBlocks = false;
      }
      LocatedBlock lb = locatedblocks.get(i);
      final ExtendedBlock block = lb.getBlock();
      final DatanodeInfo[] datanodes = lb.getLocations();
     
      //try each datanode location of the block
      final int timeout = 3000 * datanodes.length + socketTimeout;
      boolean done = false;
      for(int j = 0; !done && j < datanodes.length; j++) {
        Socket sock = null;
        DataOutputStream out = null;
        DataInputStream in = null;
       
        try {
          //connect to a datanode
          sock = socketFactory.createSocket();
          String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname);
          if (LOG.isDebugEnabled()) {
            LOG.debug("Connecting to datanode " + dnAddr);
          }
          NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
          sock.setSoTimeout(timeout);

          OutputStream unbufOut = NetUtils.getOutputStream(sock);
          InputStream unbufIn = NetUtils.getInputStream(sock);
          if (encryptionKey != null) {
            IOStreamPair encryptedStreams =
                DataTransferEncryptor.getEncryptedStreams(
                    unbufOut, unbufIn, encryptionKey);
            unbufOut = encryptedStreams.out;
            unbufIn = encryptedStreams.in;
          }
          out = new DataOutputStream(new BufferedOutputStream(unbufOut,
              HdfsConstants.SMALL_BUFFER_SIZE));
          in = new DataInputStream(unbufIn);

          if (LOG.isDebugEnabled()) {
            LOG.debug("write to " + datanodes[j] + ": "
                + Op.BLOCK_CHECKSUM + ", block=" + block);
          }
          // get block MD5
          new Sender(out).blockChecksum(block, lb.getBlockToken());

          final BlockOpResponseProto reply =
            BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));

          if (reply.getStatus() != Status.SUCCESS) {
View Full Code Here

    return shortCircuitLocalReads && isLocalAddress(targetAddr);
  }

  void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
    DatanodeInfo [] dnArr = { dn };
    LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
    reportChecksumFailure(file, lblocks);
  }
View Full Code Here

        return;
      }

      //get a new datanode
      final DatanodeInfo[] original = nodes;
      final LocatedBlock lb = dfsClient.namenode.getAdditionalDatanode(
          src, block, nodes, failed.toArray(new DatanodeInfo[failed.size()]),
          1, dfsClient.clientName);
      nodes = lb.getLocations();

      //find the new datanode
      final int d = findNewDatanode(original);

      //transfer replica
      final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
      final DatanodeInfo[] targets = {nodes[d]};
      transfer(src, targets, lb.getBlockToken());
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.