Tabnine Logo
ExportedBlockKeys.isBlockTokenEnabled
Code IndexAdd Tabnine to your IDE (free)

How to use
isBlockTokenEnabled
method
in
org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys

Best Java code snippets using org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys.isBlockTokenEnabled (Showing top 10 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

if (!hasAnyBlockPoolRegistered) {
 hasAnyBlockPoolRegistered = true;
 isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
 if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
  throw new RuntimeException("Inconsistent configuration of block access"
    + " tokens. Either all block pools must be configured to use block"
origin: org.apache.hadoop/hadoop-hdfs

this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
if (isBlockTokenEnabled) {
 long updateInterval = keys.getKeyUpdateInterval();
origin: org.apache.hadoop/hadoop-hdfs

public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
   .newBuilder();
 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
   .setKeyUpdateInterval(keys.getKeyUpdateInterval())
   .setTokenLifeTime(keys.getTokenLifetime())
   .setCurrentKey(convert(keys.getCurrentKey()));
 for (BlockKey k : keys.getAllKeys()) {
  builder.addAllKeys(convert(k));
 }
 return builder.build();
}
origin: ch.cern.hadoop/hadoop-hdfs

if (!hasAnyBlockPoolRegistered) {
 hasAnyBlockPoolRegistered = true;
 isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
 if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
  throw new RuntimeException("Inconsistent configuration of block access"
    + " tokens. Either all block pools must be configured to use block"
origin: io.prestosql.hadoop/hadoop-apache

if (!hasAnyBlockPoolRegistered) {
 hasAnyBlockPoolRegistered = true;
 isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
 if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
  throw new RuntimeException("Inconsistent configuration of block access"
    + " tokens. Either all block pools must be configured to use block"
origin: ch.cern.hadoop/hadoop-hdfs

public KeyManager(String blockpoolID, NamenodeProtocol namenode,
  boolean encryptDataTransfer, Configuration conf) throws IOException {
 this.namenode = namenode;
 this.encryptDataTransfer = encryptDataTransfer;
 this.timer = new Timer();
 final ExportedBlockKeys keys = namenode.getBlockKeys();
 this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
 if (isBlockTokenEnabled) {
  long updateInterval = keys.getKeyUpdateInterval();
  long tokenLifetime = keys.getTokenLifetime();
  LOG.info("Block token params received from NN: update interval="
    + StringUtils.formatTime(updateInterval)
    + ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
  String encryptionAlgorithm = conf.get(
    DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  this.blockTokenSecretManager = new BlockTokenSecretManager(
    updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
  this.blockTokenSecretManager.addKeys(keys);
  // sync block keys with NN more frequently than NN updates its block keys
  this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
  this.shouldRun = true;
 } else {
  this.blockTokenSecretManager = null;
  this.blockKeyUpdater = null;
 }
}

origin: io.prestosql.hadoop/hadoop-apache

public KeyManager(String blockpoolID, NamenodeProtocol namenode,
  boolean encryptDataTransfer, Configuration conf) throws IOException {
 this.namenode = namenode;
 this.encryptDataTransfer = encryptDataTransfer;
 this.timer = new Timer();
 final ExportedBlockKeys keys = namenode.getBlockKeys();
 this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
 if (isBlockTokenEnabled) {
  long updateInterval = keys.getKeyUpdateInterval();
  long tokenLifetime = keys.getTokenLifetime();
  LOG.info("Block token params received from NN: update interval="
    + StringUtils.formatTime(updateInterval)
    + ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
  String encryptionAlgorithm = conf.get(
    DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  this.blockTokenSecretManager = new BlockTokenSecretManager(
    updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
  this.blockTokenSecretManager.addKeys(keys);
  // sync block keys with NN more frequently than NN updates its block keys
  this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
  this.shouldRun = true;
 } else {
  this.blockTokenSecretManager = null;
  this.blockKeyUpdater = null;
 }
}

origin: io.fabric8/fabric-hadoop

this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
if (isBlockTokenEnabled) {
 long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
origin: io.prestosql.hadoop/hadoop-apache

public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
   .newBuilder();
 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
   .setKeyUpdateInterval(keys.getKeyUpdateInterval())
   .setTokenLifeTime(keys.getTokenLifetime())
   .setCurrentKey(convert(keys.getCurrentKey()));
 for (BlockKey k : keys.getAllKeys()) {
  builder.addAllKeys(convert(k));
 }
 return builder.build();
}
origin: ch.cern.hadoop/hadoop-hdfs

public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
   .newBuilder();
 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
   .setKeyUpdateInterval(keys.getKeyUpdateInterval())
   .setTokenLifeTime(keys.getTokenLifetime())
   .setCurrentKey(convert(keys.getCurrentKey()));
 for (BlockKey k : keys.getAllKeys()) {
  builder.addAllKeys(convert(k));
 }
 return builder.build();
}
org.apache.hadoop.hdfs.security.token.blockExportedBlockKeysisBlockTokenEnabled

Popular methods of ExportedBlockKeys

  • getKeyUpdateInterval
  • getTokenLifetime
  • <init>
  • getAllKeys
  • getCurrentKey

Popular in Java

  • Creating JSON documents from java classes using gson
  • scheduleAtFixedRate (Timer)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • getSharedPreferences (Context)
  • ObjectMapper (com.fasterxml.jackson.databind)
    ObjectMapper provides functionality for reading and writing JSON, either to and from basic POJOs (Pl
  • Kernel (java.awt.image)
  • URLEncoder (java.net)
    This class is used to encode a string using the format required by application/x-www-form-urlencoded
  • Timestamp (java.sql)
    A Java representation of the SQL TIMESTAMP type. It provides the capability of representing the SQL
  • Collections (java.util)
    This class consists exclusively of static methods that operate on or return collections. It contains
  • LinkedHashMap (java.util)
    LinkedHashMap is an implementation of Map that guarantees iteration order. All optional operations a
  • Top 25 Plugins for Webstorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now