private String getStatus( long complete, long total ) { if ( total >= 1024 ) { return toKB( complete ) + "/" + toKB( total ) + " KB "; } else if ( total >= 0 ) { return complete + "/" + total + " B "; } else if ( complete >= 1024 ) { return toKB( complete ) + " KB "; } else { return complete + " B "; } }
default String getName() { return getMetadata().getName(); }
public void startAll() { if (manualComponentsToStart.isEmpty()) { internalStart(componentsToStart); } else { internalStart(manualComponentsToStart); } }
public ZookeeperBootstrap() { if (zookeeperLocalCluster == null) { try { configuration = HadoopUtils.INSTANCE.loadConfigFile(null); loadConfig(); } catch (BootstrapException e) { LOGGER.error("unable to load configuration", e); } } }
public HiveMetastoreBootstrap(URL url) { if (hiveLocalMetaStore == null) { try { configuration = HadoopUtils.INSTANCE.loadConfigFile(url); loadConfig(); } catch (BootstrapException e) { LOGGER.error("unable to load configuration", e); } } }
public HBaseBootstrap(URL url) { if (hbaseLocalCluster == null) { try { configuration = HadoopUtils.INSTANCE.loadConfigFile(url); loadConfig(); } catch (BootstrapException e) { LOGGER.error("unable to load configuration", e); } } }
public ConfluentKsqlRestBootstrap() { try { configuration = HadoopUtils.INSTANCE.loadConfigFile(null); loadConfig(); } catch (BootstrapException e) { LOGGER.error("unable to load configuration", e); } }
public SolrCloudBootstrap() { if (solrServer == null) { try { configuration = HadoopUtils.INSTANCE.loadConfigFile(null); loadConfig(); } catch (BootstrapException e) { LOGGER.error("unable to load configuration", e); } } }
public KafkaBootstrap(URL url) { if (kafkaLocalCluster == null) { try { configuration = HadoopUtils.INSTANCE.loadConfigFile(url); loadConfig(); } catch (BootstrapException e) { LOGGER.error("unable to load configuration", e); } } }
@Override public void execute() throws MojoExecutionException, MojoFailureException { if (skipTests) { getLog().info("Hadoop Unit's stop goal is skipped"); } else { HadoopBootstrapRemoteUtils utils = new HadoopBootstrapRemoteUtils(project, session, pluginManager); hadoopUnitPath = utils.getHadoopUnitPath(hadoopUnitPath, getLog()); getLog().info("is going to stop hadoop unit with executable " + ((exec == null) ? "./hadoop-unit-standalone" : exec)); utils.operateRemoteHadoopUnit(hadoopUnitPath, outputFile, "stop", exec); Path hadoopLogFilePath = Paths.get(hadoopUnitPath, "wrapper.log"); getLog().info("is going tail log file"); utils.tailLogFileUntilFind(hadoopLogFilePath, "<-- Wrapper Stopped", getLog()); getLog().info("hadoop unit stopped"); } }
public void run() { LOGGER.info("All services are going to be stopped"); bootstrap.stopAll(); } });
private void loadConfig() throws BootstrapException { HadoopUtils.INSTANCE.setHadoopHome(); host = configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY); port = configuration.getInt(HdfsConfig.HDFS_NAMENODE_PORT_KEY); httpPort = configuration.getInt(HdfsConfig.HDFS_NAMENODE_HTTP_PORT_KEY); tempDirectory = configuration.getString(HdfsConfig.HDFS_TEMP_DIR_KEY); numDatanodes = configuration.getInt(HdfsConfig.HDFS_NUM_DATANODES_KEY); enablePermission = configuration.getBoolean(HdfsConfig.HDFS_ENABLE_PERMISSIONS_KEY); format = configuration.getBoolean(HdfsConfig.HDFS_FORMAT_KEY); enableRunningUserAsProxy = configuration.getBoolean(HdfsConfig.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER); replication = configuration.getInt(HdfsConfig.HDFS_REPLICATION_KEY, 1); }
public HadoopBootstrap add(String componentName) throws NotFoundServiceException { manualComponentsToStart.add(getService(componentName)); return this; }
protected void configure() { bind(ILoggerFactory.class).toInstance(slf4jLoggerFactory); } });
default String getName() { return getMetadata().getName(); }
public void run() { LOGGER.info("All services are going to be stopped"); stopAll(); } });
private void loadConfig() throws BootstrapException { HadoopUtils.INSTANCE.setHadoopHome(); host = configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY); port = configuration.getInt(HdfsConfig.HDFS_NAMENODE_PORT_KEY); httpPort = configuration.getInt(HdfsConfig.HDFS_NAMENODE_HTTP_PORT_KEY); tempDirectory = configuration.getString(HdfsConfig.HDFS_TEMP_DIR_KEY); numDatanodes = configuration.getInt(HdfsConfig.HDFS_NUM_DATANODES_KEY); enablePermission = configuration.getBoolean(HdfsConfig.HDFS_ENABLE_PERMISSIONS_KEY); format = configuration.getBoolean(HdfsConfig.HDFS_FORMAT_KEY); enableRunningUserAsProxy = configuration.getBoolean(HdfsConfig.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER); replication = configuration.getInt(HdfsConfig.HDFS_REPLICATION_KEY, 1); }
public void startAll() { if (manualComponentsToStart.isEmpty()) { internalStart(componentsToStart); } else { internalStart(manualComponentsToStart); } }
public HadoopBootstrap add(String componentName) throws NotFoundServiceException { manualComponentsToStart.add(getService(componentName)); return this; }
public void run() { LOGGER.info("All services are going to be stopped"); stopAll(); } });