@Override public Peer accept() throws IOException, SocketTimeoutException { DomainSocket connSock = sock.accept(); Peer peer = null; boolean success = false; try { peer = new DomainPeer(connSock); success = true; return peer; } finally { if (!success) { if (peer != null) peer.close(); connSock.close(); } } }
NewShmInfo shmInfo = null; boolean success = false; DomainSocket sock = peer.getDomainSocket(); try { if (sock == null) {
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout); InputStream input = socketIn; try { "at {}. Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption", peer.getRemoteAddressString(), imne); } else { LOG.info("Failed to read expected SASL data transfer protection " + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection", peer.getRemoteAddressString(), imne); if (opsProcessed != 0) { assert dnConf.socketKeepaliveTimeout > 0; peer.setReadTimeout(dnConf.socketKeepaliveTimeout); } else { peer.setReadTimeout(dnConf.socketTimeout); peer.setReadTimeout(dnConf.socketTimeout); ++opsProcessed; } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0)); } catch (Throwable t) { String s = datanode.getDisplayName() + ":DataXceiver error processing "
private DataXceiver(Peer peer, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { super(datanode.getTracer()); this.peer = peer; this.dnConf = datanode.getDnConf(); this.socketIn = peer.getInputStream(); this.socketOut = peer.getOutputStream(); this.datanode = datanode; this.dataXceiverServer = dataXceiverServer; this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname; this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf()); this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf()); remoteAddress = peer.getRemoteAddressString(); final int colonIdx = remoteAddress.indexOf(':'); remoteAddressWithoutPort = (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx); localAddress = peer.getLocalAddressString(); LOG.debug("Number of active connections is: {}", datanode.getXceiverCount()); }
|| stage == BlockConstructionStage.TRANSFER_FINALIZED; allowLazyPersist = allowLazyPersist && (dnConf.getAllowNonLocalLazyPersist() || peer.isLocal()); long size = 0; isDatanode, isClient, isTransfer); LOG.debug("writeBlock receive buf size {} tcp no delay {}", peer.getReceiveBufferSize(), peer.getTcpNoDelay()); peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
private synchronized Peer getInternal(DatanodeID dnId, boolean isDomain) { List<Value> sockStreamList = multimap.get(new Key(dnId, isDomain)); if (sockStreamList == null) { return null; } Iterator<Value> iter = sockStreamList.iterator(); while (iter.hasNext()) { Value candidate = iter.next(); iter.remove(); long ageMs = Time.monotonicNow() - candidate.getTime(); Peer peer = candidate.getPeer(); if (ageMs >= expiryPeriod) { try { peer.close(); } catch (IOException e) { LOG.warn("got IOException closing stale peer " + peer + ", which is " + ageMs + " ms old"); } } else if (!peer.isClosed()) { return peer; } } return null; }
/** * Sends client SASL negotiation for a peer if required. * * @param peer connection peer * @param encryptionKeyFactory for creation of an encryption key * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ public Peer peerSend(Peer peer, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { IOStreamPair ios = checkTrustAndSend(getPeerAddress(peer), peer.getOutputStream(), peer.getInputStream(), encryptionKeyFactory, accessToken, datanodeId); // TODO: Consider renaming EncryptedPeer to SaslPeer. return ios != null ? new EncryptedPeer(peer, ios) : peer; }
/** * When the reader reaches end of the read, it sends a status response * (e.g. CHECKSUM_OK) to the DN. Failure to do so could lead to the DN * closing our connection (which we will re-open), but won't affect * data correctness. */ void sendReadResult(Peer peer, Status statusCode) { assert !sentStatusCode : "already sent status code to " + peer; try { RemoteBlockReader2.writeReadResult(peer.getOutputStream(), statusCode); sentStatusCode = true; } catch (IOException e) { // It's ok not to be able to send this. But something is probably wrong. LOG.info("Could not send read status (" + statusCode + ") to datanode " + peer.getRemoteAddressString() + ": " + e.getMessage()); } }
@Override public boolean isClosed() { return enclosedPeer.isClosed(); }
assertTrue(peer.getDomainSocket() != null); peers.remove(peer); peer = cache.get(dnId, false); assertTrue(peer != null); assertTrue(!peer.isClosed()); peers.remove(peer);
cipherSuites, peer.getRemoteAddressString()); peer.getRemoteAddressString());
LOG.warn("Client {} did not send a valid status code " + "after reading. Will close connection.", peer.getRemoteAddressString()); IOUtils.closeStream(out); datanode.metrics.incrReadsFromClient(peer.isLocal(), read);
static void checkSuccess( BlockOpResponseProto status, Peer peer, ExtendedBlock block, String file) throws IOException { String logInfo = "for OP_READ_BLOCK" + ", self=" + peer.getLocalAddressString() + ", remote=" + peer.getRemoteAddressString() + ", for file " + file + ", for pool " + block.getBlockPoolId() + " block " + block.getBlockId() + "_" + block.getGenerationStamp(); DataTransferProtoUtil.checkBlockOpStatus(status, logInfo); }
if (peer.isLocal()) { this.isLocal = true;
@Test(timeout=10000) public void testDFSClientPeerTimeout() throws IOException { final int timeout = 1000; final Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); // only need cluster to create a dfs client to get a peer final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // use a dummy socket to ensure the read timesout ServerSocket socket = new ServerSocket(0); Peer peer = dfs.getClient().newConnectedPeer( (InetSocketAddress) socket.getLocalSocketAddress(), null, null); long start = Time.now(); try { peer.getInputStream().read(); Assert.fail("should timeout"); } catch (SocketTimeoutException ste) { long delta = Time.now() - start; Assert.assertTrue("timedout too soon", delta >= timeout*0.9); Assert.assertTrue("timedout too late", delta <= timeout*1.1); } catch (Throwable t) { Assert.fail("wrong exception:"+t); } } finally { cluster.shutdown(); } }
@Override public int getReceiveBufferSize() throws IOException { return enclosedPeer.getReceiveBufferSize(); }
@Override public String getLocalAddressString() { return enclosedPeer.getLocalAddressString(); }
@Override public boolean getTcpNoDelay() throws IOException { return enclosedPeer.getTcpNoDelay(); }
+ ", isClient=" + isClient + ", isTransfer=" + isTransfer); LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() + " tcp no delay " + peer.getTcpNoDelay()); peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, datanode.metrics.incrWritesFromClient(peer.isLocal(), size);
private DataXceiver(Peer peer, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { this.peer = peer; this.dnConf = datanode.getDnConf(); this.socketIn = peer.getInputStream(); this.socketOut = peer.getOutputStream(); this.datanode = datanode; this.dataXceiverServer = dataXceiverServer; this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname; remoteAddress = peer.getRemoteAddressString(); final int colonIdx = remoteAddress.indexOf(':'); remoteAddressWithoutPort = (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx); localAddress = peer.getLocalAddressString(); if (LOG.isDebugEnabled()) { LOG.debug("Number of active connections is: " + datanode.getXceiverCount()); } }