Package org.apache.giraph.comm.requests

Examples of org.apache.giraph.comm.requests.WritableRequest


      int partitionMutationCount) {
    // Send a request if enough mutations are there for a partition
    if (partitionMutationCount >= maxMutationsPerPartition) {
      Map<I, VertexMutations<I, V, E, M>> partitionMutations =
          sendMutationsCache.removePartitionMutations(partitionId);
      WritableRequest writableRequest =
          new SendPartitionMutationsRequest<I, V, E, M>(
              partitionId, partitionMutations);
      doRequest(partitionOwner.getWorkerInfo(), writableRequest);
    }
  }
View Full Code Here


    PairList<WorkerInfo,
        PairList<Integer, ByteArrayVertexIdMessages<I, M>>>.Iterator
        iterator = remainingMessageCache.getIterator();
    while (iterator.hasNext()) {
      iterator.next();
      WritableRequest writableRequest =
          new SendWorkerMessagesRequest<I, M>(
              iterator.getCurrentSecond());
      doRequest(iterator.getCurrentFirst(), writableRequest);
    }

    // Execute the remaining sends edges (if any)
    PairList<WorkerInfo, PairList<Integer,
        ByteArrayVertexIdEdges<I, E>>>
        remainingEdgeCache = sendEdgeCache.removeAllEdges();
    PairList<WorkerInfo,
        PairList<Integer, ByteArrayVertexIdEdges<I, E>>>.Iterator
        edgeIterator = remainingEdgeCache.getIterator();
    while (edgeIterator.hasNext()) {
      edgeIterator.next();
      WritableRequest writableRequest =
          new SendWorkerEdgesRequest<I, E>(
              edgeIterator.getCurrentSecond());
      doRequest(edgeIterator.getCurrentFirst(), writableRequest);
    }

    // Execute the remaining sends mutations (if any)
    Map<Integer, Map<I, VertexMutations<I, V, E, M>>> remainingMutationsCache =
        sendMutationsCache.removeAllPartitionMutations();
    for (Map.Entry<Integer, Map<I, VertexMutations<I, V, E, M>>> entry :
        remainingMutationsCache.entrySet()) {
      WritableRequest writableRequest =
          new SendPartitionMutationsRequest<I, V, E, M>(
              entry.getKey(), entry.getValue());
      PartitionOwner partitionOwner =
          serviceWorker.getVertexPartitionOwner(
              entry.getValue().keySet().iterator().next());
View Full Code Here

    client = new NettyClient(context, conf, new WorkerInfo());
    client.connectAllAddresses(
        Lists.<WorkerInfo>newArrayList(workerInfo));

    // Send the request 2x, but should only be processed once
    WritableRequest request1 = getRequest();
    WritableRequest request2 = getRequest();
    client.sendWritableRequest(workerInfo.getTaskId(), request1);
    client.sendWritableRequest(workerInfo.getTaskId(), request2);
    client.waitAllRequests();

    // Stop the service
View Full Code Here

        new MockExceptionHandler());
    client.connectAllAddresses(
        Lists.<WorkerInfo>newArrayList(workerInfo));

    // Send the request 2x, but should only be processed once
    WritableRequest request1 = getRequest();
    WritableRequest request2 = getRequest();
    client.sendWritableRequest(workerInfo.getTaskId(), request1);
    client.sendWritableRequest(workerInfo.getTaskId(), request2);
    client.waitAllRequests();

    // Stop the service
View Full Code Here

    if (LOG.isDebugEnabled()) {
      LOG.debug("messageReceived: Got " + msg.getClass());
    }

    WritableRequest writableRequest = (WritableRequest) msg;
    // Simulate a closed connection on the first request (if desired)
    // TODO: Move out into a separate, dedicated handler.
    if (closeFirstRequest && !ALREADY_CLOSED_FIRST_REQUEST) {
      LOG.info("messageReceived: Simulating closing channel on first " +
          "request " + writableRequest.getRequestId() + " from " +
          writableRequest.getClientId());
      setAlreadyClosedFirstRequest();
      ctx.close();
      return;
    }

    if (writableRequest.getType() == RequestType.SASL_TOKEN_MESSAGE_REQUEST) {
      // initialize server-side SASL functionality, if we haven't yet
      // (in which case we are looking at the first SASL message from the
      // client).
      SaslNettyServer saslNettyServer =
          ctx.attr(NettyServer.CHANNEL_SASL_NETTY_SERVERS).get();
View Full Code Here

    // Decode the request
    ByteBuf buf = (ByteBuf) msg;
    int enumValue = buf.readByte();
    RequestType type = RequestType.values()[enumValue];
    Class<? extends WritableRequest> requestClass = type.getRequestClass();
    WritableRequest request =
        ReflectionUtils.newInstance(requestClass, conf);
    request = RequestUtils.decodeWritableRequest(buf, request);

    if (LOG.isDebugEnabled()) {
      LOG.debug("decode: Client " + request.getClientId() +
          ", requestId " + request.getRequestId() +
          ", " +  request.getType() + ", with size " +
          buf.writerIndex() + " took " +
          Times.getNanosSince(TIME, startDecodingNanoseconds) + " ns");
    }
    ReferenceCountUtil.release(buf);
    // fire writableRequest object to upstream handlers
View Full Code Here

    if (LOG.isDebugEnabled()) {
      startEncodingNanoseconds = TIME.getNanoseconds();
    }

    ByteBuf buf;
    WritableRequest request = (WritableRequest) msg;
    int requestSize = request.getSerializedSize();
    if (requestSize == WritableRequest.UNKNOWN_SIZE) {
      buf = ctx.alloc().buffer(bufferStartingSize);
    } else {
      requestSize +=  SIZE_OF_INT + SIZE_OF_BYTE;
      buf = ctx.alloc().buffer(requestSize);
    }
    ByteBufOutputStream output = new ByteBufOutputStream(buf);

    // This will later be filled with the correct size of serialized request
    output.writeInt(0);
    output.writeByte(request.getType().ordinal());
    try {
      request.write(output);
    } catch (IndexOutOfBoundsException e) {
      LOG.error("write: Most likely the size of request was not properly " +
          "specified (this buffer is too small) - see getSerializedSize() " +
          "in " + request.getType().getRequestClass());
      throw new IllegalStateException(e);
    }
    output.flush();
    output.close();

    // Set the correct size at the end
    buf.setInt(0, buf.writerIndex() - SIZE_OF_INT);
    if (LOG.isDebugEnabled()) {
      LOG.debug("write: Client " + request.getClientId() + ", " +
          "requestId " + request.getRequestId() +
          ", size = " + buf.readableBytes() + ", " +
          request.getType() + " took " +
          Times.getNanosSince(TIME, startEncodingNanoseconds) + " ns");
    }
    ctx.write(buf, promise);
  }
View Full Code Here

      throw new IllegalArgumentException(
          "encode: cannot encode message of type " + msg.getClass() +
              " since it is not an instance of an implementation of " +
              " WritableRequest.");
    }
    @SuppressWarnings("unchecked")
    WritableRequest writableRequest = (WritableRequest) msg;

    ByteBuf buf = ctx.alloc().buffer(10);
    ByteBufOutputStream output = new ByteBufOutputStream(buf);

    if (LOG.isDebugEnabled()) {
      LOG.debug("encode: Encoding a message of type " + msg.getClass());
    }

    // Space is reserved now to be filled later by the serialize request size
    output.writeInt(0);
    // write type of object.
    output.writeByte(writableRequest.getType().ordinal());
    // write the object itself.
    writableRequest.write(output);

    output.flush();
    output.close();

    // Set the correct size at the end.
    buf.setInt(0, buf.writerIndex() - SIZE_OF_INT);

    if (LOG.isDebugEnabled()) {
      LOG.debug("encode: Encoding a message of type " + msg.getClass());
    }
    ctx.write(buf, promise);
/*if[HADOOP_NON_SECURE]
else[HADOOP_NON_SECURE]*/
    if (writableRequest.getType() == RequestType.SASL_COMPLETE_REQUEST) {
      // We are sending to the client a SASL_COMPLETE response (created by
      // the SaslServer handler). The SaslServer handler has removed itself
      // from the pipeline after creating this response, and now it's time for
      // the ResponseEncoder to remove itself also.
      if (LOG.isDebugEnabled()) {
View Full Code Here

    // Send a request if the cache of outgoing message to
    // the remote worker 'workerInfo' is full enough to be flushed
    if (workerMessageSize >= maxMessagesSizePerWorker) {
      PairList<Integer, VertexIdMessages<I, M>>
        workerMessages = removeWorkerMessages(workerInfo);
      WritableRequest writableRequest =
        new SendWorkerMessagesRequest<I, M>(workerMessages);
      totalMsgBytesSentInSuperstep += writableRequest.getSerializedSize();
      clientProcessor.doRequest(workerInfo, writableRequest);
      // Notify sending
      getServiceWorker().getGraphTaskManager().notifySentMessages();
    }
  }
View Full Code Here

    PairList<WorkerInfo, PairList<
        Integer, VertexIdMessages<I, M>>>.Iterator
    iterator = remainingMessageCache.getIterator();
    while (iterator.hasNext()) {
      iterator.next();
      WritableRequest writableRequest =
        new SendWorkerMessagesRequest<I, M>(
          iterator.getCurrentSecond());
      totalMsgBytesSentInSuperstep += writableRequest.getSerializedSize();
      clientProcessor.doRequest(
        iterator.getCurrentFirst(), writableRequest);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.giraph.comm.requests.WritableRequest

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.