this.dataDirs = dataDirectories; this.dnConf = new DNConf(this); checkSecureConfig(dnConf, getConf(), resources); dnConf.maxLockedMemory); int volFailuresTolerated = dnConf.getVolFailuresTolerated(); int volsConfigured = dnConf.getVolsConfigured(); if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT || volFailuresTolerated >= volsConfigured) { saslClient = new SaslDataTransferClient(dnConf.getConf(), dnConf.saslPropsResolver, dnConf.trustedChannelResolver); saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
private void checkNNVersion(NamespaceInfo nsInfo) throws IncorrectVersionException { // build and layout versions should match String nnVersion = nsInfo.getSoftwareVersion(); String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion(); if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) { IncorrectVersionException ive = new IncorrectVersionException( minimumNameNodeVersion, nnVersion, "NameNode", "DataNode"); LOG.warn(ive.getMessage()); throw ive; } String dnVersion = VersionInfo.getVersion(); if (!nnVersion.equals(dnVersion)) { LOG.info("Reported NameNode version '" + nnVersion + "' does not match " + "DataNode version '" + dnVersion + "' but is within acceptable " + "limits. Note: This is normal during a rolling upgrade."); } }
long bpReadyTimeout = dnConf.getBpReadyTimeout(); StopWatch sw = new StopWatch(); sw.start();
OutputStream underlyingOut, InputStream underlyingIn) throws IOException { if (peer.hasSecureChannel() || dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) { return new IOStreamPair(underlyingIn, underlyingOut); dnConf.getEncryptionAlgorithm()); dnConf.getEncryptionAlgorithm());
InputStream underlyingIn, int xferPort, DatanodeID datanodeId) throws IOException { if (dnConf.getEncryptDataTransfer()) { LOG.debug( "SASL server doing encrypted handshake for peer = {}, datanodeId = {}", + "peer = {}, datanodeId = {}", peer, datanodeId); return new IOStreamPair(underlyingIn, underlyingOut); } else if (dnConf.getSaslPropsResolver() != null) { LOG.debug( "SASL server doing general handshake for peer = {}, datanodeId = {}", peer, datanodeId); return getSaslStreams(peer, underlyingOut, underlyingIn); } else if (dnConf.getIgnoreSecurePortsForTesting()) {
if (dnConf.getIgnoreSecurePortsForTesting()) { return; SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver(); if (saslPropsResolver != null && DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY) {
/** * Receives SASL negotiation for general-purpose handshake. * * @param peer connection peer * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut, InputStream underlyingIn) throws IOException { if (peer.hasSecureChannel() || dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) { return new IOStreamPair(underlyingIn, underlyingOut); } SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver(); Map<String, String> saslProps = saslPropsResolver.getServerProperties( getPeerAddress(peer)); CallbackHandler callbackHandler = new SaslServerCallbackHandler( new PasswordFunction() { @Override public char[] apply(String userName) throws IOException { return buildServerPassword(userName); } }); return doSaslHandshake(peer, underlyingOut, underlyingIn, saslProps, callbackHandler); }
if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) { throw new InvalidMagicNumberException(magicNumber, dnConf.getEncryptDataTransfer()); if (sasl.isNegotiatedQopPrivacy()) { Configuration conf = dnConf.getConf(); cipherOption = negotiateCipherOption(conf, cipherOptions); if (LOG.isDebugEnabled()) { dnConf.getConf(), cipherOption, underlyingOut, underlyingIn, true) : sasl.createStreamPair(out, in); } catch (IOException ioe) {
/** * Creates a dummy DataNode for testing purpose. */ @VisibleForTesting @InterfaceAudience.LimitedPrivate("HDFS") DataNode(final Configuration conf) throws DiskErrorException { super(conf); this.tracer = createTracer(conf); this.tracerConfigurationManager = new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf); this.fileIoProvider = new FileIoProvider(conf, this); this.fileDescriptorPassingDisabledReason = null; this.maxNumberOfBlocksToLog = 0; this.confVersion = null; this.usersWithLocalPathAccess = null; this.connectToDnViaHostname = false; this.blockScanner = new BlockScanner(this, this.getConf()); this.pipelineSupportECN = false; this.socketFactory = NetUtils.getDefaultSocketFactory(conf); this.dnConf = new DNConf(this); initOOBTimeout(); storageLocationChecker = null; volumeChecker = new DatasetVolumeChecker(conf, new Timer()); }
volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated(); dataLocations, storage); volsConfigured = datanode.getDnConf().getVolsConfigured(); int volsFailed = volumeFailureInfos.size(); datanode.getDnConf().getMaxLockedMemory() > 0) { lazyWriter = new Daemon(new LazyWriter(conf)); lazyWriter.start();
public FsDatasetCache(FsDatasetImpl dataset) { this.dataset = dataset; this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory(); ThreadFactory workerFactory = new ThreadFactoryBuilder() .setDaemon(true)
public DNConf(final Configurable dn) { this.dn = dn; socketTimeout = getConf().getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT); socketWriteTimeout = getConf().getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, HdfsConstants.WRITE_TIMEOUT); socketKeepaliveTimeout = getConf().getInt( DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT); this.transferSocketSendBufferSize = getConf().getInt( DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_KEY, DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_DEFAULT); this.transferSocketRecvBufferSize = getConf().getInt( DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY, DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT); this.tcpNoDelay = getConf().getBoolean( DFSConfigKeys.DFS_DATA_TRANSFER_SERVER_TCPNODELAY, DFSConfigKeys.DFS_DATA_TRANSFER_SERVER_TCPNODELAY_DEFAULT); transferToAllowed = getConf().getBoolean( DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT); readaheadLength = getConf().getLong( HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT); maxDataLength = getConf().getInt(DFSConfigKeys.IPC_MAXIMUM_DATA_LENGTH, DFSConfigKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT); dropCacheBehindWrites = getConf().getBoolean( DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,
InputStream underlyingIn, int xferPort, DatanodeID datanodeId) throws IOException { if (dnConf.getEncryptDataTransfer()) { LOG.debug( "SASL server doing encrypted handshake for peer = {}, datanodeId = {}", + "peer = {}, datanodeId = {}", peer, datanodeId); return new IOStreamPair(underlyingIn, underlyingOut); } else if (dnConf.getSaslPropsResolver() != null) { LOG.debug( "SASL server doing general handshake for peer = {}, datanodeId = {}", peer, datanodeId); return getSaslStreams(peer, underlyingOut, underlyingIn); } else if (dnConf.getIgnoreSecurePortsForTesting()) {
/** * Receives SASL negotiation for general-purpose handshake. * * @param peer connection peer * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut, InputStream underlyingIn) throws IOException { if (peer.hasSecureChannel() || dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) { return new IOStreamPair(underlyingIn, underlyingOut); } SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver(); Map<String, String> saslProps = saslPropsResolver.getServerProperties( getPeerAddress(peer)); CallbackHandler callbackHandler = new SaslServerCallbackHandler( new PasswordFunction() { @Override public char[] apply(String userName) throws IOException { return buildServerPassword(userName); } }); return doSaslHandshake(underlyingOut, underlyingIn, saslProps, callbackHandler); }
OutputStream underlyingOut, InputStream underlyingIn) throws IOException { if (peer.hasSecureChannel() || dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) { return new IOStreamPair(underlyingIn, underlyingOut); dnConf.getEncryptionAlgorithm()); dnConf.getEncryptionAlgorithm());
return; SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver(); if (resources != null && saslPropsResolver == null) { return; if (dnConf.getIgnoreSecurePortsForTesting()) { return;
if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) { throw new InvalidMagicNumberException(magicNumber, dnConf.getEncryptDataTransfer()); if (sasl.isNegotiatedQopPrivacy()) { cipherOption = negotiateCipherOption(dnConf.getConf(), cipherOptions); if (cipherOption != null) { if (LOG.isDebugEnabled()) { dnConf.getConf(), cipherOption, underlyingOut, underlyingIn, true) : sasl.createStreamPair(out, in); } catch (IOException ioe) {
@Before public void setupMocks() throws Exception { mockNN1 = setupNNMock(0); mockNN2 = setupNNMock(1); // Set up a mock DN with the bare-bones configuration // objects, etc. mockDn = Mockito.mock(DataNode.class); Mockito.doReturn(true).when(mockDn).shouldRun(); Configuration conf = new Configuration(); File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data"); conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString()); Mockito.doReturn(conf).when(mockDn).getConf(); Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf(); Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn")) .when(mockDn).getMetrics(); // Set up a simulated dataset with our fake BP mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf)); mockFSDataset.addBlockPool(FAKE_BPID, conf); // Wire the dataset to the DN. Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset(); }
public FsDatasetCache(FsDatasetImpl dataset) { this.dataset = dataset; this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory(); ThreadFactory workerFactory = new ThreadFactoryBuilder() .setDaemon(true)
InputStream underlyingIn, int xferPort, DatanodeID datanodeId) throws IOException { if (dnConf.getEncryptDataTransfer()) { LOG.debug( "SASL server doing encrypted handshake for peer = {}, datanodeId = {}", + "peer = {}, datanodeId = {}", peer, datanodeId); return new IOStreamPair(underlyingIn, underlyingOut); } else if (dnConf.getSaslPropsResolver() != null) { LOG.debug( "SASL server doing general handshake for peer = {}, datanodeId = {}", peer, datanodeId); return getSaslStreams(peer, underlyingOut, underlyingIn); } else if (dnConf.getIgnoreSecurePortsForTesting()) {