Tabnine Logo
BlockScanner.<init>
Code IndexAdd Tabnine to your IDE (free)

How to use
org.apache.hadoop.hdfs.server.datanode.BlockScanner
constructor

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.BlockScanner.<init> (Showing top 9 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

  new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
this.fileIoProvider = new FileIoProvider(conf, this);
this.blockScanner = new BlockScanner(this);
this.lastDiskErrorCheck = 0;
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Creates a dummy DataNode for testing purpose.
 */
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
DataNode(final Configuration conf) throws DiskErrorException {
 super(conf);
 this.tracer = createTracer(conf);
 this.tracerConfigurationManager =
   new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
 this.fileIoProvider = new FileIoProvider(conf, this);
 this.fileDescriptorPassingDisabledReason = null;
 this.maxNumberOfBlocksToLog = 0;
 this.confVersion = null;
 this.usersWithLocalPathAccess = null;
 this.connectToDnViaHostname = false;
 this.blockScanner = new BlockScanner(this, this.getConf());
 this.pipelineSupportECN = false;
 this.socketFactory = NetUtils.getDefaultSocketFactory(conf);
 this.dnConf = new DNConf(this);
 initOOBTimeout();
 storageLocationChecker = null;
 volumeChecker = new DatasetVolumeChecker(conf, new Timer());
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Creates a dummy DataNode for testing purpose.
 */
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
DataNode(final Configuration conf) {
 super(conf);
 this.blockScanner = new BlockScanner(this, conf);
 this.fileDescriptorPassingDisabledReason = null;
 this.maxNumberOfBlocksToLog = 0;
 this.confVersion = null;
 this.usersWithLocalPathAccess = null;
 this.connectToDnViaHostname = false;
 this.getHdfsBlockLocationsEnabled = false;
 this.pipelineSupportECN = false;
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Creates a dummy DataNode for testing purpose.
 */
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
DataNode(final Configuration conf) {
 super(conf);
 this.blockScanner = new BlockScanner(this, conf);
 this.fileDescriptorPassingDisabledReason = null;
 this.maxNumberOfBlocksToLog = 0;
 this.confVersion = null;
 this.usersWithLocalPathAccess = null;
 this.connectToDnViaHostname = false;
 this.getHdfsBlockLocationsEnabled = false;
 this.pipelineSupportECN = false;
}
origin: ch.cern.hadoop/hadoop-hdfs

    final SecureResources resources) throws IOException {
super(conf);
this.blockScanner = new BlockScanner(this, conf);
this.lastDiskErrorCheck = 0;
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void setUp() {
 dataset = mock(FsDatasetImpl.class);
 baseDir = new FileSystemTestHelper().getTestRootDir();
 Configuration blockScannerConf = new Configuration();
 blockScannerConf.setInt(DFSConfigKeys.
   DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
 blockScanner = new BlockScanner(null, blockScannerConf);
}
origin: io.prestosql.hadoop/hadoop-apache

    final SecureResources resources) throws IOException {
super(conf);
this.blockScanner = new BlockScanner(this, conf);
this.lastDiskErrorCheck = 0;
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
origin: ch.cern.hadoop/hadoop-hdfs

@Before
public void setUp() throws IOException {
 datanode = mock(DataNode.class);
 storage = mock(DataStorage.class);
 this.conf = new Configuration();
 this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
 final DNConf dnConf = new DNConf(conf);
 when(datanode.getConf()).thenReturn(conf);
 when(datanode.getDnConf()).thenReturn(dnConf);
 final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf);
 when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
 final ShortCircuitRegistry shortCircuitRegistry =
   new ShortCircuitRegistry(conf);
 when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
 createStorageDirs(storage, conf, NUM_INIT_VOLUMES);
 dataset = new FsDatasetImpl(datanode, storage, conf);
 for (String bpid : BLOCK_POOL_IDS) {
  dataset.addBlockPool(bpid, conf);
 }
 assertEquals(NUM_INIT_VOLUMES, dataset.getVolumes().size());
 assertEquals(0, dataset.getNumFailedVolumes());
}
origin: ch.cern.hadoop/hadoop-hdfs

  new RoundRobinVolumeChoosingPolicy<>();
conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
final BlockScanner blockScanner = new BlockScanner(datanode, conf);
final FsVolumeList volumeList = new FsVolumeList(
  Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
org.apache.hadoop.hdfs.server.datanodeBlockScanner<init>

Popular methods of BlockScanner

  • isEnabled
    Returns true if the block scanner is enabled. If the block scanner is disabled, no volume scanners w
  • markSuspectBlock
    Mark a block as "suspect." This means that we should try to rescan it soon. Note that the VolumeScan
  • removeAllVolumeScanners
    Stops and removes all volume scanners. This function will block until all the volume scanners have s
  • addVolumeScanner
    Set up a scanner for the given block pool and volume.
  • disableBlockPoolId
    Disable scanning a given block pool id.
  • enableBlockPoolId
    Enable scanning a given block pool id.
  • printStats
  • removeVolumeScanner
    Stops and removes a volume scanner. This function will block until the volume scanner has stopped.
  • getVolumeStats
  • setConf

Popular in Java

  • Creating JSON documents from java classes using gson
  • onRequestPermissionsResult (Fragment)
  • onCreateOptionsMenu (Activity)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • ServerSocket (java.net)
    This class represents a server-side socket that waits for incoming client connections. A ServerSocke
  • Timestamp (java.sql)
    A Java representation of the SQL TIMESTAMP type. It provides the capability of representing the SQL
  • Collections (java.util)
    This class consists exclusively of static methods that operate on or return collections. It contains
  • ConcurrentHashMap (java.util.concurrent)
    A plug-in replacement for JDK1.5 java.util.concurrent.ConcurrentHashMap. This version is based on or
  • JComboBox (javax.swing)
  • JLabel (javax.swing)
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now