congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
DNConf
Code IndexAdd Tabnine to your IDE (free)

How to use
DNConf
in
org.apache.hadoop.hdfs.server.datanode

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.DNConf (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

 this.dataDirs = dataDirectories;
this.dnConf = new DNConf(this);
checkSecureConfig(dnConf, getConf(), resources);
  dnConf.maxLockedMemory);
int volFailuresTolerated = dnConf.getVolFailuresTolerated();
int volsConfigured = dnConf.getVolsConfigured();
if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT
  || volFailuresTolerated >= volsConfigured) {
saslClient = new SaslDataTransferClient(dnConf.getConf(),
  dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
origin: org.apache.hadoop/hadoop-hdfs

private void checkNNVersion(NamespaceInfo nsInfo)
  throws IncorrectVersionException {
 // build and layout versions should match
 String nnVersion = nsInfo.getSoftwareVersion();
 String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
 if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
  IncorrectVersionException ive = new IncorrectVersionException(
    minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
  LOG.warn(ive.getMessage());
  throw ive;
 }
 String dnVersion = VersionInfo.getVersion();
 if (!nnVersion.equals(dnVersion)) {
  LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
    "DataNode version '" + dnVersion + "' but is within acceptable " +
    "limits. Note: This is normal during a rolling upgrade.");
 }
}
origin: org.apache.hadoop/hadoop-hdfs

long bpReadyTimeout = dnConf.getBpReadyTimeout();
StopWatch sw = new StopWatch();
sw.start();
origin: org.apache.hadoop/hadoop-hdfs

 OutputStream underlyingOut, InputStream underlyingIn) throws IOException {
if (peer.hasSecureChannel() ||
  dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
 return new IOStreamPair(underlyingIn, underlyingOut);
 dnConf.getEncryptionAlgorithm());
  dnConf.getEncryptionAlgorithm());
origin: org.apache.hadoop/hadoop-hdfs

 InputStream underlyingIn, int xferPort, DatanodeID datanodeId)
 throws IOException {
if (dnConf.getEncryptDataTransfer()) {
 LOG.debug(
  "SASL server doing encrypted handshake for peer = {}, datanodeId = {}",
  + "peer = {}, datanodeId = {}", peer, datanodeId);
 return new IOStreamPair(underlyingIn, underlyingOut);
} else if (dnConf.getSaslPropsResolver() != null) {
 LOG.debug(
  "SASL server doing general handshake for peer = {}, datanodeId = {}",
  peer, datanodeId);
 return getSaslStreams(peer, underlyingOut, underlyingIn);
} else if (dnConf.getIgnoreSecurePortsForTesting()) {
origin: org.apache.hadoop/hadoop-hdfs

if (dnConf.getIgnoreSecurePortsForTesting()) {
 return;
 SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
 if (saslPropsResolver != null &&
   DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY) {
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Receives SASL negotiation for general-purpose handshake.
 *
 * @param peer connection peer
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut,
  InputStream underlyingIn) throws IOException {
 if (peer.hasSecureChannel() ||
   dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
  return new IOStreamPair(underlyingIn, underlyingOut);
 }
 SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
 Map<String, String> saslProps = saslPropsResolver.getServerProperties(
  getPeerAddress(peer));
 CallbackHandler callbackHandler = new SaslServerCallbackHandler(
  new PasswordFunction() {
   @Override
   public char[] apply(String userName) throws IOException {
    return buildServerPassword(userName);
   }
 });
 return doSaslHandshake(peer, underlyingOut, underlyingIn, saslProps,
   callbackHandler);
}
origin: org.apache.hadoop/hadoop-hdfs

if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) {
 throw new InvalidMagicNumberException(magicNumber, 
   dnConf.getEncryptDataTransfer());
 if (sasl.isNegotiatedQopPrivacy()) {
  Configuration conf = dnConf.getConf();
  cipherOption = negotiateCipherOption(conf, cipherOptions);
  if (LOG.isDebugEnabled()) {
   dnConf.getConf(), cipherOption, underlyingOut, underlyingIn, true) : 
    sasl.createStreamPair(out, in);
} catch (IOException ioe) {
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Creates a dummy DataNode for testing purpose.
 */
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
DataNode(final Configuration conf) throws DiskErrorException {
 super(conf);
 this.tracer = createTracer(conf);
 this.tracerConfigurationManager =
   new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
 this.fileIoProvider = new FileIoProvider(conf, this);
 this.fileDescriptorPassingDisabledReason = null;
 this.maxNumberOfBlocksToLog = 0;
 this.confVersion = null;
 this.usersWithLocalPathAccess = null;
 this.connectToDnViaHostname = false;
 this.blockScanner = new BlockScanner(this, this.getConf());
 this.pipelineSupportECN = false;
 this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
 this.dnConf = new DNConf(this);
 initOOBTimeout();
 storageLocationChecker = null;
 volumeChecker = new DatasetVolumeChecker(conf, new Timer());
}
origin: org.apache.hadoop/hadoop-hdfs

volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();
  dataLocations, storage);
volsConfigured = datanode.getDnConf().getVolsConfigured();
int volsFailed = volumeFailureInfos.size();
  datanode.getDnConf().getMaxLockedMemory() > 0) {
 lazyWriter = new Daemon(new LazyWriter(conf));
 lazyWriter.start();
origin: org.apache.hadoop/hadoop-hdfs

public FsDatasetCache(FsDatasetImpl dataset) {
 this.dataset = dataset;
 this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory();
 ThreadFactory workerFactory = new ThreadFactoryBuilder()
   .setDaemon(true)
origin: org.apache.hadoop/hadoop-hdfs

public DNConf(final Configurable dn) {
 this.dn = dn;
 socketTimeout = getConf().getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
   HdfsConstants.READ_TIMEOUT);
 socketWriteTimeout = getConf().getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
   HdfsConstants.WRITE_TIMEOUT);
 socketKeepaliveTimeout = getConf().getInt(
   DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
   DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
 this.transferSocketSendBufferSize = getConf().getInt(
   DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_KEY,
   DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_DEFAULT);
 this.transferSocketRecvBufferSize = getConf().getInt(
   DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY,
   DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT);
 this.tcpNoDelay = getConf().getBoolean(
   DFSConfigKeys.DFS_DATA_TRANSFER_SERVER_TCPNODELAY,
   DFSConfigKeys.DFS_DATA_TRANSFER_SERVER_TCPNODELAY_DEFAULT);
 transferToAllowed = getConf().getBoolean(
   DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
   DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT);
 readaheadLength = getConf().getLong(
   HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY,
   HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
 maxDataLength = getConf().getInt(DFSConfigKeys.IPC_MAXIMUM_DATA_LENGTH,
   DFSConfigKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
 dropCacheBehindWrites = getConf().getBoolean(
   DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,
origin: ch.cern.hadoop/hadoop-hdfs

 InputStream underlyingIn, int xferPort, DatanodeID datanodeId)
 throws IOException {
if (dnConf.getEncryptDataTransfer()) {
 LOG.debug(
  "SASL server doing encrypted handshake for peer = {}, datanodeId = {}",
  + "peer = {}, datanodeId = {}", peer, datanodeId);
 return new IOStreamPair(underlyingIn, underlyingOut);
} else if (dnConf.getSaslPropsResolver() != null) {
 LOG.debug(
  "SASL server doing general handshake for peer = {}, datanodeId = {}",
  peer, datanodeId);
 return getSaslStreams(peer, underlyingOut, underlyingIn);
} else if (dnConf.getIgnoreSecurePortsForTesting()) {
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Receives SASL negotiation for general-purpose handshake.
 *
 * @param peer connection peer
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut,
  InputStream underlyingIn) throws IOException {
 if (peer.hasSecureChannel() ||
   dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
  return new IOStreamPair(underlyingIn, underlyingOut);
 }
 SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
 Map<String, String> saslProps = saslPropsResolver.getServerProperties(
  getPeerAddress(peer));
 CallbackHandler callbackHandler = new SaslServerCallbackHandler(
  new PasswordFunction() {
   @Override
   public char[] apply(String userName) throws IOException {
    return buildServerPassword(userName);
   }
 });
 return doSaslHandshake(underlyingOut, underlyingIn, saslProps,
   callbackHandler);
}
origin: ch.cern.hadoop/hadoop-hdfs

 OutputStream underlyingOut, InputStream underlyingIn) throws IOException {
if (peer.hasSecureChannel() ||
  dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) {
 return new IOStreamPair(underlyingIn, underlyingOut);
 dnConf.getEncryptionAlgorithm());
  dnConf.getEncryptionAlgorithm());
origin: io.prestosql.hadoop/hadoop-apache

 return;
SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
if (resources != null && saslPropsResolver == null) {
 return;
if (dnConf.getIgnoreSecurePortsForTesting()) {
 return;
origin: ch.cern.hadoop/hadoop-hdfs

if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) {
 throw new InvalidMagicNumberException(magicNumber, 
   dnConf.getEncryptDataTransfer());
 if (sasl.isNegotiatedQopPrivacy()) {
  cipherOption = negotiateCipherOption(dnConf.getConf(), cipherOptions);
  if (cipherOption != null) {
   if (LOG.isDebugEnabled()) {
   dnConf.getConf(), cipherOption, underlyingOut, underlyingIn, true) : 
    sasl.createStreamPair(out, in);
} catch (IOException ioe) {
origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void setupMocks() throws Exception {
 mockNN1 = setupNNMock(0);
 mockNN2 = setupNNMock(1);
 // Set up a mock DN with the bare-bones configuration
 // objects, etc.
 mockDn = Mockito.mock(DataNode.class);
 Mockito.doReturn(true).when(mockDn).shouldRun();
 Configuration conf = new Configuration();
 File dnDataDir = new File(new File(TEST_BUILD_DATA, "dfs"), "data");
 conf.set(DFS_DATANODE_DATA_DIR_KEY, dnDataDir.toURI().toString());
 Mockito.doReturn(conf).when(mockDn).getConf();
 Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
 Mockito.doReturn(DataNodeMetrics.create(conf, "fake dn"))
 .when(mockDn).getMetrics();
 // Set up a simulated dataset with our fake BP
 mockFSDataset = Mockito.spy(new SimulatedFSDataset(null, conf));
 mockFSDataset.addBlockPool(FAKE_BPID, conf);
 // Wire the dataset to the DN.
 Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
}
origin: ch.cern.hadoop/hadoop-hdfs

public FsDatasetCache(FsDatasetImpl dataset) {
 this.dataset = dataset;
 this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory();
 ThreadFactory workerFactory = new ThreadFactoryBuilder()
   .setDaemon(true)
origin: io.prestosql.hadoop/hadoop-apache

 InputStream underlyingIn, int xferPort, DatanodeID datanodeId)
 throws IOException {
if (dnConf.getEncryptDataTransfer()) {
 LOG.debug(
  "SASL server doing encrypted handshake for peer = {}, datanodeId = {}",
  + "peer = {}, datanodeId = {}", peer, datanodeId);
 return new IOStreamPair(underlyingIn, underlyingOut);
} else if (dnConf.getSaslPropsResolver() != null) {
 LOG.debug(
  "SASL server doing general handshake for peer = {}, datanodeId = {}",
  peer, datanodeId);
 return getSaslStreams(peer, underlyingOut, underlyingIn);
} else if (dnConf.getIgnoreSecurePortsForTesting()) {
org.apache.hadoop.hdfs.server.datanodeDNConf

Javadoc

Simple class encapsulating all of the configuration that the DataNode loads at startup time.

Most used methods

  • <init>
  • getMinimumNameNodeVersion
  • getBpReadyTimeout
  • getConf
    Returns the configuration.
  • getEncryptDataTransfer
    Returns true if encryption enabled for DataTransferProtocol.
  • getEncryptionAlgorithm
    Returns encryption algorithm configured for DataTransferProtocol, or null if not configured.
  • getIgnoreSecurePortsForTesting
    Returns true if configuration is set to skip checking for proper port configuration in a secured clu
  • getMaxLockedMemory
  • getSaslPropsResolver
    Returns the SaslPropertiesResolver configured for use with DataTransferProtocol, or null if not conf
  • getTrustedChannelResolver
    Returns the TrustedChannelResolver configured for use with DataTransferProtocol, or null if not conf
  • getXceiverStopTimeout
  • getTransferSocketRecvBufferSize
  • getXceiverStopTimeout,
  • getTransferSocketRecvBufferSize,
  • getTransferSocketSendBufferSize,
  • getAllowNonLocalLazyPersist,
  • getConnectToDnViaHostname,
  • getDataTransferServerTcpNoDelay,
  • getLifelineIntervalMs,
  • getMaxDataLength,
  • getSlowIoWarningThresholdMs,
  • getSocketTimeout

Popular in Java

  • Start an intent from android
  • runOnUiThread (Activity)
  • setContentView (Activity)
  • notifyDataSetChanged (ArrayAdapter)
  • Window (java.awt)
    A Window object is a top-level window with no borders and no menubar. The default layout for a windo
  • UnknownHostException (java.net)
    Thrown when a hostname can not be resolved.
  • Arrays (java.util)
    This class contains various methods for manipulating arrays (such as sorting and searching). This cl
  • LinkedList (java.util)
    Doubly-linked list implementation of the List and Dequeinterfaces. Implements all optional list oper
  • List (java.util)
    An ordered collection (also known as a sequence). The user of this interface has precise control ove
  • Notification (javax.management)
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now