Tabnine Logo
Daemon
Code IndexAdd Tabnine to your IDE (free)

How to use
Daemon
in
org.apache.hadoop.util

Best Java code snippets using org.apache.hadoop.util.Daemon (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-common

@Override
public Thread newThread(Runnable runnable) {
 return new Daemon(runnable);
}
origin: org.apache.hadoop/hadoop-common

public void shutdown() {
 LOG.info("Stopping HealthMonitor thread");
 shouldRun = false;
 daemon.interrupt();
}
origin: org.apache.hadoop/hadoop-common

void join() throws InterruptedException {
 daemon.join();
}
origin: org.apache.hadoop/hadoop-hdfs

responder = new Daemon(datanode.threadGroup, 
  new PacketResponder(replyOut, mirrIn, downstreams));
responder.start(); // start thread to processes responses
((PacketResponder)responder.getRunnable()).close();
responderClosed = true;
 responder.interrupt();
 responder.interrupt();
 responder.join(joinTimeout);
 if (responder.isAlive()) {
  String msg = "Join on responder thread " + responder
    + " timed out";
 responder.interrupt();
origin: org.apache.hadoop/hadoop-hdfs

 public Daemon recoverBlocks(final String who,
   final Collection<RecoveringBlock> blocks) {
  Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
   @Override
   public void run() {
    for(RecoveringBlock b : blocks) {
     try {
      logRecoverBlock(who, b);
      if (b.isStriped()) {
       new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
      } else {
       new RecoveryTaskContiguous(b).recover();
      }
     } catch (IOException e) {
      LOG.warn("recoverBlocks FAILED: " + b, e);
     }
    }
   }
  });
  d.start();
  return d;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

void stop() {
 fsRunning = false;
 if(timerThread == null) return;
 timerThread.interrupt();
 try {
  timerThread.join(3000);
 } catch (InterruptedException ie) {
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Initializes block movement tracker daemon and starts the thread.
 */
private void startMovementTracker() {
 movementTrackerThread = new Daemon(this.blkMovementTracker);
 movementTrackerThread.setName("BlockStorageMovementTracker");
 movementTrackerThread.start();
}
origin: org.apache.hadoop/hadoop-hdfs

Daemon daemon = new Daemon(threadGroup,
  new DataTransfer(targets, targetStorageTypes, targetStorageIds, b,
    stage, client));
daemon.start();
try {
 daemon.join();
} catch (InterruptedException e) {
 throw new IOException(
origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public void shutdown() {
 fsRunning = false;
 if (lazyWriter != null) {
  ((LazyWriter) lazyWriter.getRunnable()).stop();
  lazyWriter.interrupt();
 }
 if (mbeanName != null) {
  MBeans.unregister(mbeanName);
 }
 
 if (asyncDiskService != null) {
  asyncDiskService.shutdown();
 }
 if (asyncLazyPersistService != null) {
  asyncLazyPersistService.shutdown();
 }
 
 if(volumes != null) {
  volumes.shutdown();
 }
 if (lazyWriter != null) {
  try {
   lazyWriter.join();
  } catch (InterruptedException ie) {
   LOG.warn("FsDatasetImpl.shutdown ignoring InterruptedException " +
          "from LazyWriter.join");
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs-nfs

if (dumpThread != null && dumpThread.isAlive()) {
 dumpThread.interrupt();
 try {
  dumpThread.join(3000);
 } catch (InterruptedException ignored) {
origin: org.apache.hadoop/hadoop-common

/** Construct a daemon thread. */
public Daemon(Runnable runnable) {
 super(runnable);
 this.runnable = runnable;
 this.setName(((Object)runnable).toString());
}
origin: org.apache.hadoop/hadoop-hdfs

public void activate(Configuration conf, long blockTotal) {
 pendingReconstruction.start();
 datanodeManager.activate(conf);
 this.redundancyThread.setName("RedundancyMonitor");
 this.redundancyThread.start();
 storageInfoDefragmenterThread.setName("StorageInfoMonitor");
 storageInfoDefragmenterThread.start();
 this.blockReportThread.start();
 mxBeanName = MBeans.register("NameNode", "BlockStats", this);
 bmSafeMode.activate(blockTotal);
}
origin: org.apache.hadoop/hadoop-hdfs

leaseManager.stopMonitor();
if (nnrmthread != null) {
 ((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
 nnrmthread.interrupt();
 ((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop();
 nnEditLogRoller.interrupt();
 ((LazyPersistFileScrubber) lazyPersistFileScrubber.getRunnable()).stop();
 lazyPersistFileScrubber.interrupt();
origin: org.apache.hadoop/hadoop-common

void start() {
 daemon.start();
}
origin: org.apache.hadoop/hadoop-common

boolean isAlive() {
 return daemon.isAlive();
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // ClientDatanodeProtocol
public long getBalancerBandwidth() {
 DataXceiverServer dxcs =
           (DataXceiverServer) this.dataXceiverServer.getRunnable();
 return dxcs.balanceThrottler.getBandwidth();
}

origin: ch.cern.hadoop/hadoop-hdfs

responder = new Daemon(datanode.threadGroup, 
  new PacketResponder(replyOut, mirrIn, downstreams));
responder.start(); // start thread to processes responses
((PacketResponder)responder.getRunnable()).close();
responderClosed = true;
 responder.interrupt();
 responder.interrupt();
 responder.join(joinTimeout);
 if (responder.isAlive()) {
  String msg = "Join on responder thread " + responder
    + " timed out";
 responder.interrupt();
origin: org.apache.hadoop/hadoop-hdfs

private void startDiskOutlierDetectionThread() {
 slowDiskDetectionDaemon = new Daemon(new Runnable() {
  @Override
  public void run() {
 slowDiskDetectionDaemon.start();
origin: org.apache.hadoop/hadoop-hdfs

void close() {
 heartbeatThread.interrupt();
 try {
  // This will no effect if the thread hasn't yet been started.
  heartbeatThread.join(3000);
 } catch (InterruptedException ignored) {
 }
}

origin: org.apache.hadoop/hadoop-hdfs

public void activate() {
 pathIdCollector = new Daemon(pathIDProcessor);
 pathIdCollector.setName("SPSPathIdProcessor");
 pathIdCollector.start();
}
org.apache.hadoop.utilDaemon

Javadoc

A thread that has called Thread#setDaemon(boolean) with true.

Most used methods

  • <init>
    Construct a daemon thread to be part of a specified thread group.
  • interrupt
  • join
  • start
  • isAlive
  • setName
  • getRunnable
  • getName

Popular in Java

  • Creating JSON documents from java classes using gson
  • notifyDataSetChanged (ArrayAdapter)
  • orElseThrow (Optional)
    Return the contained value, if present, otherwise throw an exception to be created by the provided s
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • Graphics2D (java.awt)
    This Graphics2D class extends the Graphics class to provide more sophisticated control overgraphics
  • File (java.io)
    An "abstract" representation of a file system entity identified by a pathname. The pathname may be a
  • Permission (java.security)
    Legacy security code; do not use.
  • SortedSet (java.util)
    SortedSet is a Set which iterates over its elements in a sorted order. The order is determined eithe
  • Get (org.apache.hadoop.hbase.client)
    Used to perform Get operations on a single row. To get everything for a row, instantiate a Get objec
  • IsNull (org.hamcrest.core)
    Is the value null?
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now