Tabnine Logo
ExportedBlockKeys
Code IndexAdd Tabnine to your IDE (free)

How to use
ExportedBlockKeys
in
org.apache.hadoop.hdfs.security.token.block

Best Java code snippets using org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
   .newBuilder();
 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
   .setKeyUpdateInterval(keys.getKeyUpdateInterval())
   .setTokenLifeTime(keys.getTokenLifetime())
   .setCurrentKey(convert(keys.getCurrentKey()));
 for (BlockKey k : keys.getAllKeys()) {
  builder.addAllKeys(convert(k));
 }
 return builder.build();
}
origin: org.apache.hadoop/hadoop-hdfs

 @Override
 public Writable newInstance() {
  return new ExportedBlockKeys();
 }
});
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Set block keys, only to be used in worker mode
 */
public synchronized void addKeys(ExportedBlockKeys exportedKeys)
  throws IOException {
 if (isMaster || exportedKeys == null) {
  return;
 }
 LOG.info("Setting block keys");
 removeExpiredKeys();
 this.currentKey = exportedKeys.getCurrentKey();
 BlockKey[] receivedKeys = exportedKeys.getAllKeys();
 for (int i = 0; i < receivedKeys.length; i++) {
  if (receivedKeys[i] != null) {
   this.allKeys.put(receivedKeys[i].getKeyId(), receivedKeys[i]);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

if (!hasAnyBlockPoolRegistered) {
 hasAnyBlockPoolRegistered = true;
 isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
 if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
  throw new RuntimeException("Inconsistent configuration of block access"
    + " tokens. Either all block pools must be configured to use block"
 long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
 long blockTokenLifetime = keys.getTokenLifetime();
 LOG.info("Block token params received from NN: " +
   "for block pool {} keyUpdateInterval={} min(s), " +
origin: org.apache.hadoop/hadoop-hdfs

this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
if (isBlockTokenEnabled) {
 long updateInterval = keys.getKeyUpdateInterval();
 long tokenLifetime = keys.getTokenLifetime();
 LOG.info("Block token params received from NN: update interval="
   + StringUtils.formatTime(updateInterval)
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Set block keys, only to be used in slave mode
 */
public synchronized void addKeys(ExportedBlockKeys exportedKeys)
  throws IOException {
 if (isMaster || exportedKeys == null)
  return;
 LOG.info("Setting block keys");
 removeExpiredKeys();
 this.currentKey = exportedKeys.getCurrentKey();
 BlockKey[] receivedKeys = exportedKeys.getAllKeys();
 for (int i = 0; i < receivedKeys.length; i++) {
  if (receivedKeys[i] == null)
   continue;
  this.allKeys.put(receivedKeys[i].getKeyId(), receivedKeys[i]);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
   .newBuilder();
 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
   .setKeyUpdateInterval(keys.getKeyUpdateInterval())
   .setTokenLifeTime(keys.getTokenLifetime())
   .setCurrentKey(convert(keys.getCurrentKey()));
 for (BlockKey k : keys.getAllKeys()) {
  builder.addAllKeys(convert(k));
 }
 return builder.build();
}
origin: ch.cern.hadoop/hadoop-hdfs

if (!hasAnyBlockPoolRegistered) {
 hasAnyBlockPoolRegistered = true;
 isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
 if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
  throw new RuntimeException("Inconsistent configuration of block access"
    + " tokens. Either all block pools must be configured to use block"
 long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
 long blockTokenLifetime = keys.getTokenLifetime();
 LOG.info("Block token params received from NN: for block pool " +
   blockPoolId + " keyUpdateInterval="
origin: org.apache.hadoop/hadoop-hdfs

KeyUpdateCommand() {
 this(new ExportedBlockKeys());
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Set block keys, only to be used in slave mode
 */
public synchronized void addKeys(ExportedBlockKeys exportedKeys)
  throws IOException {
 if (isMaster || exportedKeys == null)
  return;
 LOG.info("Setting block keys");
 removeExpiredKeys();
 this.currentKey = exportedKeys.getCurrentKey();
 BlockKey[] receivedKeys = exportedKeys.getAllKeys();
 for (int i = 0; i < receivedKeys.length; i++) {
  if (receivedKeys[i] == null)
   continue;
  this.allKeys.put(receivedKeys[i].getKeyId(), receivedKeys[i]);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
 ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
   .newBuilder();
 builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
   .setKeyUpdateInterval(keys.getKeyUpdateInterval())
   .setTokenLifeTime(keys.getTokenLifetime())
   .setCurrentKey(convert(keys.getCurrentKey()));
 for (BlockKey k : keys.getAllKeys()) {
  builder.addAllKeys(convert(k));
 }
 return builder.build();
}
origin: io.prestosql.hadoop/hadoop-apache

if (!hasAnyBlockPoolRegistered) {
 hasAnyBlockPoolRegistered = true;
 isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
 if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
  throw new RuntimeException("Inconsistent configuration of block access"
    + " tokens. Either all block pools must be configured to use block"
 long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
 long blockTokenLifetime = keys.getTokenLifetime();
 LOG.info("Block token params received from NN: for block pool " +
   blockPoolId + " keyUpdateInterval="
origin: org.apache.hadoop/hadoop-hdfs

/** Export block keys, only to be used in master mode */
public synchronized ExportedBlockKeys exportKeys() {
 if (!isMaster) {
  return null;
 }
 LOG.debug("Exporting access keys");
 return new ExportedBlockKeys(true, keyUpdateInterval, tokenLifetime,
   currentKey, allKeys.values().toArray(new BlockKey[0]));
}
origin: ch.cern.hadoop/hadoop-hdfs

void compare(ExportedBlockKeys expKeys, ExportedBlockKeys expKeys1) {
 BlockKey[] allKeys = expKeys.getAllKeys();
 BlockKey[] allKeys1 = expKeys1.getAllKeys();
 assertEquals(allKeys.length, allKeys1.length);
 for (int i = 0; i < allKeys.length; i++) {
  compare(allKeys[i], allKeys1[i]);
 }
 compare(expKeys.getCurrentKey(), expKeys1.getCurrentKey());
 assertEquals(expKeys.getKeyUpdateInterval(),
   expKeys1.getKeyUpdateInterval());
 assertEquals(expKeys.getTokenLifetime(), expKeys1.getTokenLifetime());
}
origin: ch.cern.hadoop/hadoop-hdfs

public KeyManager(String blockpoolID, NamenodeProtocol namenode,
  boolean encryptDataTransfer, Configuration conf) throws IOException {
 this.namenode = namenode;
 this.encryptDataTransfer = encryptDataTransfer;
 this.timer = new Timer();
 final ExportedBlockKeys keys = namenode.getBlockKeys();
 this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
 if (isBlockTokenEnabled) {
  long updateInterval = keys.getKeyUpdateInterval();
  long tokenLifetime = keys.getTokenLifetime();
  LOG.info("Block token params received from NN: update interval="
    + StringUtils.formatTime(updateInterval)
    + ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
  String encryptionAlgorithm = conf.get(
    DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  this.blockTokenSecretManager = new BlockTokenSecretManager(
    updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
  this.blockTokenSecretManager.addKeys(keys);
  // sync block keys with NN more frequently than NN updates its block keys
  this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
  this.shouldRun = true;
 } else {
  this.blockTokenSecretManager = null;
  this.blockKeyUpdater = null;
 }
}

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
 StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
 if (storageInfo == null) {
  // it's null in the case of SimulatedDataSet
  storageInfo = new StorageInfo(
    DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
    nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
    NodeType.DATA_NODE);
 }
 DatanodeID dnId = new DatanodeID(
   streamingAddr.getAddress().getHostAddress(), hostName, 
   storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
     infoSecurePort, getIpcPort());
 return new DatanodeRegistration(dnId, storageInfo, 
   new ExportedBlockKeys(), VersionInfo.getVersion());
}
origin: io.prestosql.hadoop/hadoop-apache

public KeyManager(String blockpoolID, NamenodeProtocol namenode,
  boolean encryptDataTransfer, Configuration conf) throws IOException {
 this.namenode = namenode;
 this.encryptDataTransfer = encryptDataTransfer;
 this.timer = new Timer();
 final ExportedBlockKeys keys = namenode.getBlockKeys();
 this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
 if (isBlockTokenEnabled) {
  long updateInterval = keys.getKeyUpdateInterval();
  long tokenLifetime = keys.getTokenLifetime();
  LOG.info("Block token params received from NN: update interval="
    + StringUtils.formatTime(updateInterval)
    + ", token lifetime=" + StringUtils.formatTime(tokenLifetime));
  String encryptionAlgorithm = conf.get(
    DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  this.blockTokenSecretManager = new BlockTokenSecretManager(
    updateInterval, tokenLifetime, blockpoolID, encryptionAlgorithm);
  this.blockTokenSecretManager.addKeys(keys);
  // sync block keys with NN more frequently than NN updates its block keys
  this.blockKeyUpdater = new BlockKeyUpdater(updateInterval / 4);
  this.shouldRun = true;
 } else {
  this.blockTokenSecretManager = null;
  this.blockKeyUpdater = null;
 }
}

origin: org.apache.hadoop/hadoop-hdfs

public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) {
 return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(),
   keys.getKeyUpdateInterval(), keys.getTokenLifeTime(),
   convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList()));
}
origin: io.fabric8/fabric-hadoop

this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
if (isBlockTokenEnabled) {
 long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
 long blockTokenLifetime = keys.getTokenLifetime();
 LOG.info("Block token params received from NN: keyUpdateInterval="
   + blockKeyUpdateInterval / (60 * 1000) + " min(s), tokenLifetime="
origin: io.prestosql.hadoop/hadoop-apache

 @Override
 public Writable newInstance() {
  return new ExportedBlockKeys();
 }
});
org.apache.hadoop.hdfs.security.token.blockExportedBlockKeys

Javadoc

Object for passing block keys

Most used methods

  • getKeyUpdateInterval
  • getTokenLifetime
  • <init>
  • getAllKeys
  • getCurrentKey
  • isBlockTokenEnabled

Popular in Java

  • Reactive rest calls using spring rest template
  • setRequestProperty (URLConnection)
  • getContentResolver (Context)
  • setScale (BigDecimal)
  • BufferedImage (java.awt.image)
    The BufferedImage subclass describes an java.awt.Image with an accessible buffer of image data. All
  • Path (java.nio.file)
  • ResultSet (java.sql)
    An interface for an object which represents a database table entry, returned as the result of the qu
  • Set (java.util)
    A Set is a data structure which does not allow duplicate elements.
  • Executors (java.util.concurrent)
    Factory and utility methods for Executor, ExecutorService, ScheduledExecutorService, ThreadFactory,
  • Reference (javax.naming)
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now