Socket sock = dfsClient.socketFactory.createSocket();
sock.setTcpNoDelay(true);
NetUtils.connect(sock, dnAddr, dfsClient.socketTimeout);
sock.setSoTimeout(dfsClient.socketTimeout);
BlockReader reader =
BlockReader.newBlockReader(protocolVersion, namespaceId, sock, src,
blockId, generationStamp, startOffset, len, buffersize,
verifyChecksum, clientName, bytesToCheckReadSpeed,
minReadSpeedBps, false, cliData, options);
return reader;
}
// Allow retry since there is no way of knowing whether the cached socket
// is good until we actually use it
for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) {
Socket sock = socketCache.get(dnAddr);
if (sock == null) {
fromCache = false;
sock = dfsClient.socketFactory.createSocket();
/**
* TCP_NODELAY is crucial here because of bad interactions between
* Nagle's alglrithm and delayed ACKs. With connection keepalive
* between the client and DN, the conversation looks like:
* 1. Client -> DN: Read block X
* 2. DN -> client: data for block X;
* 3. Client -> DN: Status OK (successful read)
* 4. Client -> DN: Read block Y
*
* The fact that step #3 and #4 are both in the client -> DN direction
* triggers Nagling. If the DN is using delayed ACKS, this results in
* a delay of 40ms or more.
*
* TCP_NODELAY disables nagling and thus avoid this performance
* disaster.
*/
sock.setTcpNoDelay(true);
NetUtils.connect(sock, dnAddr, dfsClient.socketTimeout);
sock.setSoTimeout(dfsClient.socketTimeout);
}
try {
// The OP_READ_BLOCK request is sent as we make the BlockReader
BlockReader reader =
BlockReader.newBlockReader(protocolVersion, namespaceId, sock, src,
blockId, generationStamp, startOffset, len, buffersize,
verifyChecksum, clientName, bytesToCheckReadSpeed,
minReadSpeedBps, true, cliData, options);
return reader;