Tabnine Logo
ExportedBlockKeys.<init>
Code IndexAdd Tabnine to your IDE (free)

How to use
org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys
constructor

Best Java code snippets using org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys.<init> (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

 @Override
 public Writable newInstance() {
  return new ExportedBlockKeys();
 }
});
origin: org.apache.hadoop/hadoop-hdfs

KeyUpdateCommand() {
 this(new ExportedBlockKeys());
}
origin: org.apache.hadoop/hadoop-hdfs

/** Export block keys, only to be used in master mode */
public synchronized ExportedBlockKeys exportKeys() {
 if (!isMaster) {
  return null;
 }
 LOG.debug("Exporting access keys");
 return new ExportedBlockKeys(true, keyUpdateInterval, tokenLifetime,
   currentKey, allKeys.values().toArray(new BlockKey[0]));
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
 StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
 if (storageInfo == null) {
  // it's null in the case of SimulatedDataSet
  storageInfo = new StorageInfo(
    DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
    nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
    NodeType.DATA_NODE);
 }
 DatanodeID dnId = new DatanodeID(
   streamingAddr.getAddress().getHostAddress(), hostName, 
   storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
     infoSecurePort, getIpcPort());
 return new DatanodeRegistration(dnId, storageInfo, 
   new ExportedBlockKeys(), VersionInfo.getVersion());
}
origin: org.apache.hadoop/hadoop-hdfs

public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) {
 return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(),
   keys.getKeyUpdateInterval(), keys.getTokenLifeTime(),
   convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList()));
}
origin: ch.cern.hadoop/hadoop-hdfs

 @Override
 public Writable newInstance() {
  return new ExportedBlockKeys();
 }
});
origin: io.prestosql.hadoop/hadoop-apache

 @Override
 public Writable newInstance() {
  return new ExportedBlockKeys();
 }
});
origin: ch.cern.hadoop/hadoop-hdfs

KeyUpdateCommand() {
 this(new ExportedBlockKeys());
}
origin: io.prestosql.hadoop/hadoop-apache

KeyUpdateCommand() {
 this(new ExportedBlockKeys());
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Export block keys, only to be used in master mode */
public synchronized ExportedBlockKeys exportKeys() {
 if (!isMaster)
  return null;
 if (LOG.isDebugEnabled())
  LOG.debug("Exporting access keys");
 return new ExportedBlockKeys(true, keyUpdateInterval, tokenLifetime,
   currentKey, allKeys.values().toArray(new BlockKey[0]));
}
origin: io.prestosql.hadoop/hadoop-apache

/** Export block keys, only to be used in master mode */
public synchronized ExportedBlockKeys exportKeys() {
 if (!isMaster)
  return null;
 if (LOG.isDebugEnabled())
  LOG.debug("Exporting access keys");
 return new ExportedBlockKeys(true, keyUpdateInterval, tokenLifetime,
   currentKey, allKeys.values().toArray(new BlockKey[0]));
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
 StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
 if (storageInfo == null) {
  // it's null in the case of SimulatedDataSet
  storageInfo = new StorageInfo(
    DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
    nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
    NodeType.DATA_NODE);
 }
 DatanodeID dnId = new DatanodeID(
   streamingAddr.getAddress().getHostAddress(), hostName, 
   storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
     infoSecurePort, getIpcPort());
 return new DatanodeRegistration(dnId, storageInfo, 
   new ExportedBlockKeys(), VersionInfo.getVersion());
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Create a DatanodeRegistration for a specific block pool.
 * @param nsInfo the namespace info from the first part of the NN handshake
 */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
 StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
 if (storageInfo == null) {
  // it's null in the case of SimulatedDataSet
  storageInfo = new StorageInfo(
    DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
    nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
    NodeType.DATA_NODE);
 }
 DatanodeID dnId = new DatanodeID(
   streamingAddr.getAddress().getHostAddress(), hostName, 
   storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
     infoSecurePort, getIpcPort());
 return new DatanodeRegistration(dnId, storageInfo, 
   new ExportedBlockKeys(), VersionInfo.getVersion());
}
origin: ch.cern.hadoop/hadoop-hdfs

public static DatanodeRegistration getLocalDatanodeRegistration() {
 return new DatanodeRegistration(getLocalDatanodeID(), new StorageInfo(
   NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
}

origin: ch.cern.hadoop/hadoop-hdfs

public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) {
 return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(),
   keys.getKeyUpdateInterval(), keys.getTokenLifeTime(),
   convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList()));
}
origin: io.prestosql.hadoop/hadoop-apache

public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) {
 return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(),
   keys.getKeyUpdateInterval(), keys.getTokenLifeTime(),
   convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList()));
}
origin: ch.cern.hadoop/hadoop-hdfs

 /**
  * Transfer blocks to another data-node.
  * Just report on behalf of the other data-node
  * that the blocks have been received.
  */
 private int transferBlocks( Block blocks[], 
               DatanodeInfo xferTargets[][],
               String targetStorageIDs[][]
              ) throws IOException {
  for(int i = 0; i < blocks.length; i++) {
   DatanodeInfo blockTargets[] = xferTargets[i];
   for(int t = 0; t < blockTargets.length; t++) {
    DatanodeInfo dnInfo = blockTargets[t];
    String targetStorageID = targetStorageIDs[i][t];
    DatanodeRegistration receivedDNReg;
    receivedDNReg = new DatanodeRegistration(dnInfo,
     new DataStorage(nsInfo),
     new ExportedBlockKeys(), VersionInfo.getVersion());
    ReceivedDeletedBlockInfo[] rdBlocks = {
     new ReceivedDeletedBlockInfo(
        blocks[i], ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
        null) };
    StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(
      targetStorageID, rdBlocks) };
    dataNodeProto.blockReceivedAndDeleted(receivedDNReg, bpid, report);
   }
  }
  return blocks.length;
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testConvertExportedBlockKeys() {
 BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
 ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
   getBlockKey(1), keys);
 ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
 ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
 compare(expKeys, expKeys1);
}

origin: ch.cern.hadoop/hadoop-hdfs

void register() throws IOException {
 // get versions from the namenode
 nsInfo = nameNodeProto.versionRequest();
 dnRegistration = new DatanodeRegistration(
   new DatanodeID(DNS.getDefaultIP("default"),
     DNS.getDefaultHost("default", "default"),
     DataNode.generateUuid(), getNodePort(dnIdx),
     DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
     DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
     DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
   new DataStorage(nsInfo),
   new ExportedBlockKeys(), VersionInfo.getVersion());
 // register datanode
 dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
 dnRegistration.setNamespaceInfo(nsInfo);
 //first block reports
 storage = new DatanodeStorage(DatanodeStorage.generateUuid());
 final StorageBlockReport[] reports = {
   new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
 };
 dataNodeProto.blockReport(dnRegistration, bpid, reports,
     new BlockReportContext(1, 0, System.nanoTime()));
}
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testConvertDatanodeRegistration() {
 DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
 BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
 ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
   getBlockKey(1), keys);
 DatanodeRegistration reg = new DatanodeRegistration(dnId,
   new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
 DatanodeRegistrationProto proto = PBHelper.convert(reg);
 DatanodeRegistration reg2 = PBHelper.convert(proto);
 compare(reg.getStorageInfo(), reg2.getStorageInfo());
 compare(reg.getExportedKeys(), reg2.getExportedKeys());
 compare(reg, reg2);
 assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
org.apache.hadoop.hdfs.security.token.blockExportedBlockKeys<init>

Popular methods of ExportedBlockKeys

  • getKeyUpdateInterval
  • getTokenLifetime
  • getAllKeys
  • getCurrentKey
  • isBlockTokenEnabled

Popular in Java

  • Reading from database using SQL prepared statement
  • onCreateOptionsMenu (Activity)
  • setRequestProperty (URLConnection)
  • runOnUiThread (Activity)
  • BufferedReader (java.io)
    Wraps an existing Reader and buffers the input. Expensive interaction with the underlying reader is
  • Charset (java.nio.charset)
    A charset is a named mapping between Unicode characters and byte sequences. Every Charset can decode
  • Permission (java.security)
    Legacy security code; do not use.
  • Comparator (java.util)
    A Comparator is used to compare two objects to determine their ordering with respect to each other.
  • Executor (java.util.concurrent)
    An object that executes submitted Runnable tasks. This interface provides a way of decoupling task s
  • Join (org.hibernate.mapping)
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now