@Override public void startActiveServices() throws IOException { try { namesystem.startActiveServices(); startTrashEmptier(getConf()); } catch (Throwable t) { doImmediateShutdown(t); } }
@Override public void startStandbyServices() throws IOException { try { namesystem.startStandbyServices(getConf()); } catch (Throwable t) { doImmediateShutdown(t); } }
@Override // ReconfigurableBase protected String reconfigurePropertyImpl(String property, String newVal) throws ReconfigurationException { final DatanodeManager datanodeManager = namesystem.getBlockManager() .getDatanodeManager(); if (property.equals(DFS_HEARTBEAT_INTERVAL_KEY)) { return reconfHeartbeatInterval(datanodeManager, property, newVal); } else if (property.equals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY)) { return reconfHeartbeatRecheckInterval(datanodeManager, property, newVal); } else if (property.equals(FS_PROTECTED_DIRECTORIES)) { return reconfProtectedDirectories(newVal); } else if (property.equals(HADOOP_CALLER_CONTEXT_ENABLED_KEY)) { return reconfCallerContextEnabled(newVal); } else if (property.equals(ipcClientRPCBackoffEnable)) { return reconfigureIPCBackoffEnabled(newVal); } else if (property.equals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY)) { return reconfigureSPSModeEvent(newVal, property); } else { throw new ReconfigurationException(property, newVal, getConf().get( property)); } }
private String reconfHeartbeatInterval(final DatanodeManager datanodeManager, final String property, final String newVal) throws ReconfigurationException { namesystem.writeLock(); try { if (newVal == null) { // set to default datanodeManager.setHeartbeatInterval(DFS_HEARTBEAT_INTERVAL_DEFAULT); return String.valueOf(DFS_HEARTBEAT_INTERVAL_DEFAULT); } else { long newInterval = getConf() .getTimeDurationHelper(DFS_HEARTBEAT_INTERVAL_KEY, newVal, TimeUnit.SECONDS); datanodeManager.setHeartbeatInterval(newInterval); return String.valueOf(datanodeManager.getHeartbeatInterval()); } } catch (NumberFormatException nfe) { throw new ReconfigurationException(property, newVal, getConf().get( property), nfe); } finally { namesystem.writeUnlock(); LOG.info("RECONFIGURE* changed heartbeatInterval to " + datanodeManager.getHeartbeatInterval()); } }
private String reconfHeartbeatRecheckInterval( final DatanodeManager datanodeManager, final String property, final String newVal) throws ReconfigurationException { namesystem.writeLock(); try { if (newVal == null) { // set to default datanodeManager.setHeartbeatRecheckInterval( DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); return String.valueOf(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); } else { datanodeManager.setHeartbeatRecheckInterval(Integer.parseInt(newVal)); return String.valueOf(datanodeManager.getHeartbeatRecheckInterval()); } } catch (NumberFormatException nfe) { throw new ReconfigurationException(property, newVal, getConf().get( property), nfe); } finally { namesystem.writeUnlock(); LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to " + datanodeManager.getHeartbeatRecheckInterval()); } }
boolean autoHaEnabled = getConf().getBoolean( DFS_HA_AUTO_FAILOVER_ENABLED_KEY, DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT); switch (req.getSource()) {
int maxEventsPerRPC = nn.getConf().getInt( DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY, DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT);
|| StoragePolicySatisfierMode.fromString(newVal) == null) { throw new ReconfigurationException(property, newVal, getConf().get(property), new HadoopIllegalArgumentException( "For enabling or disabling storage policy satisfier, must " getConf().get(property), new HadoopIllegalArgumentException( "Enabling or disabling storage policy satisfier service on " .getSPSManager() != null); if (!spsCreated) { spsCreated = namesystem.getBlockManager().createSPSManager(getConf(), newVal);
setCachingStrategy(CachingStrategy.newDropBehind()). setClientCacheContext(dfs.getClientContext()). setConfiguration(namenode.getConf()). setRemotePeerFactory(new RemotePeerFactory() { @Override
try { initializeGenericKeys(conf, nsId, namenodeId); initialize(getConf()); try { haContext.writeLock();
public void startDNServer() throws IOException { InetSocketAddress dnAddr = NameNode.getDNProtocolAddress(getConf()); int handlerCount = getConf().getInt("dfs.namenode.handler.count", 10); if (dnAddr != null) { int dnHandlerCount = getConf().getInt(DATANODE_PROTOCOL_HANDLERS, handlerCount); this.dnProtocolServer = RPC.getServer(this, dnAddr.getHostName(), dnAddr.getPort(), dnHandlerCount, false, getConf()); this.dnProtocolAddress = dnProtocolServer.getListenerAddress(); NameNode.setDNProtocolAddress(getConf(), dnProtocolAddress.getHostName() + ":" + dnProtocolAddress.getPort()); LOG.info("Datanodes endpoint is up at: " + this.dnProtocolAddress); } if (this.dnProtocolServer != null) { this.dnProtocolServer.start(); } else { this.startServerForClientRequests(); } startTrashEmptier(getConf()); }
getConf().getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) { PolicyProvider policyProvider = (PolicyProvider)(ReflectionUtils.newInstance( getConf().getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, HDFSPolicyProvider.class, PolicyProvider.class), getConf())); SecurityUtil.setPolicy(new ConfiguredPolicy(getConf(), policyProvider)); InetSocketAddress clientSocket = NameNode.getAddress(getConf()); ServerSocket socket = new ServerSocket(); socket.bind(clientSocket); socket.close(); InetSocketAddress dnSocket = NameNode.getDNProtocolAddress(getConf()); if (dnSocket != null) { socket = new ServerSocket(); .getMethodsSigFingerPrint(ClientProtocol.class, serverVersion); myMetrics = new NameNodeMetrics(getConf(), this); this.clusterName = getConf().get("dfs.cluster.name"); this.namesystem = new FSNamesystem(this, getConf()); startHttpServer(getConf());
private NamenodeMXBeanHelper getNNHelper(InetSocketAddress isa) throws IOException, MalformedObjectNameException { if (localnn != null) { Configuration runningConf = localnn.getConf(); InetSocketAddress nameNodeAddr = NameNode.getAddress(runningConf); if (nameNodeAddr.equals(isa)) { return new NamenodeMXBeanHelper(isa, conf, localnn); } } return new NamenodeMXBeanHelper(isa, conf); } /**
public void startServerForClientRequests() throws IOException { if (this.server == null) { InetSocketAddress socAddr = NameNode.getAddress(getConf()); int handlerCount = getConf().getInt("dfs.namenode.handler.count", 10); // create rpc server this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(), handlerCount, false, getConf()); // The rpc-server port can be ephemeral... ensure we have the correct info this.serverAddress = this.server.getListenerAddress(); FileSystem.setDefaultUri(getConf(), getUri(serverAddress)); if (this.httpServer != null) { // This means the server is being started once out of safemode // and jetty is initialized already this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); } LOG.info("Namenode up at: " + this.serverAddress); this.server.start(); } }
/** * {@inheritDoc} */ @Override public void reconfigurePropertyImpl(String property, String newVal) throws ReconfigurationException { // just pass everything to the namesystem if (namesystem.isPropertyReconfigurable(property)) { namesystem.reconfigureProperty(property, newVal); } else if ("fs.trash.interval".equals(property)) { try { if (newVal == null) { // set to default trash.setDeleteInterval(60L * TrashPolicyDefault.MSECS_PER_MINUTE); } else { trash.setDeleteInterval((long)( Float.valueOf(newVal) * TrashPolicyDefault.MSECS_PER_MINUTE)); } LOG.info("RECONFIGURE* changed trash deletion interval to " + newVal); } catch (NumberFormatException e) { throw new ReconfigurationException(property, newVal, getConf().get(property)); } } else { throw new ReconfigurationException(property, newVal, getConf().get(property)); } }
public void start() throws Exception { YarnConfiguration yarnConf = new YarnConfiguration(); yarnConf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 256); yarnConf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); HdfsConfiguration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); yarnCluster = new MiniYARNCluster("MiniTonY", numNodeManagers, 1, 1); dfsCluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).numDataNodes(REPLICATION).build(); yarnCluster.init(yarnConf); yarnCluster.start(); dfsCluster.waitActive(); yarnClusterConf = yarnCluster.getConfig(); hdfsClusterConf = dfsCluster.getNameNode().getConf(); yarnClusterConf.setBoolean("ipc.client.fallback-to-simple-auth-allowed", true); hdfsClusterConf.setBoolean("ipc.client.fallback-to-simple-auth-allowed", true); }