public static synchronized int getAccumuloPersistentVersion(VolumeManager fs) { // It doesn't matter which Volume is used as they should all have the data version stored return getAccumuloPersistentVersion(fs.getVolumes().iterator().next()); }
final int accumuloPersistentVersion = ServerUtil.getAccumuloPersistentVersion(fs); if (ServerUtil.persistentVersionNeedsUpgrade(accumuloPersistentVersion)) {
ServerUtil.updateAccumuloVersion(fs, accumuloPersistentVersion); log.info("Upgrade complete"); waitForMetadataUpgrade.countDown();
int dataVersion = ServerUtil.getAccumuloPersistentVersion(context.getVolumeManager()); log.info("Data Version {}", dataVersion); ServerUtil.waitForZookeeperAndHdfs(context); if (!(canUpgradeFromDataVersion(dataVersion))) { throw new RuntimeException("This version of accumulo (" + Constants.VERSION + ") is not compatible with files stored using data version " + dataVersion); monitorSwappiness(conf);
final int accumuloPersistentVersion = ServerUtil.getAccumuloPersistentVersion(fs); if (ServerUtil.persistentVersionNeedsUpgrade(accumuloPersistentVersion)) { + " save all logs and file a bug."); ServerUtil.abortIfFateTransactions(getContext()); try { log.info("Upgrading zookeeper");
public static void main(String[] args) throws Exception { final String app = "tracer"; ServerOpts opts = new ServerOpts(); opts.parseArgs(app, args); ServerContext context = new ServerContext(opts.getSiteConfiguration()); loginTracer(context.getConfiguration()); MetricsSystemHelper.configure(TraceServer.class.getSimpleName()); ServerUtil.init(context, app); try (TraceServer server = new TraceServer(context, opts.getAddress())) { server.run(); } finally { log.info("tracer stopping"); context.getZooReaderWriter().getZooKeeper().close(); } }
/** * Utility program that will change the goal state for the master from the command line. */ public static void main(String[] args) throws Exception { if (args.length != 1 || MasterGoalState.valueOf(args[0]) == null) { System.err.println( "Usage: accumulo " + SetGoalState.class.getName() + " [NORMAL|SAFE_MODE|CLEAN_STOP]"); System.exit(-1); } ServerContext context = new ServerContext(new SiteConfiguration()); SecurityUtil.serverLogin(context.getConfiguration()); ServerUtil.waitForZookeeperAndHdfs(context); context.getZooReaderWriter().putPersistentData( context.getZooKeeperRoot() + Constants.ZMASTER_GOAL_STATE, args[0].getBytes(UTF_8), NodeExistsPolicy.OVERWRITE); }
ServerInfo(SiteConfiguration config) { SingletonManager.setMode(Mode.SERVER); siteConfig = config; hadoopConf = new Configuration(); try { volumeManager = VolumeManagerImpl.get(siteConfig, hadoopConf); } catch (IOException e) { throw new IllegalStateException(e); } Path instanceIdPath = ServerUtil.getAccumuloInstanceIdPath(volumeManager); instanceID = ZooUtil.getInstanceIDFromHdfs(instanceIdPath, config, hadoopConf); zooKeepers = config.get(Property.INSTANCE_ZK_HOST); zooKeepersSessionTimeOut = (int) config.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT); zooCache = new ZooCacheFactory().getZooCache(zooKeepers, zooKeepersSessionTimeOut); instanceName = InstanceOperationsImpl.lookupInstanceName(zooCache, UUID.fromString(instanceID)); }
public void setupServer(String appName, String appClassName, String hostname) { applicationName = appName; applicationClassName = appClassName; this.hostname = hostname; SecurityUtil.serverLogin(info.getSiteConfiguration()); log.info("Version " + Constants.VERSION); log.info("Instance " + info.getInstanceID()); ServerUtil.init(this, applicationName); MetricsSystemHelper.configure(applicationClassName); DistributedTrace.enable(hostname, applicationName, getServerConfFactory().getSystemConfiguration()); if (getSaslParams() != null) { // Server-side "client" check to make sure we're logged in as a user we expect to be enforceKerberosLogin(); } }
public static synchronized int getAccumuloPersistentVersion(Volume v) { Path path = ServerConstants.getDataVersionLocation(v); return getAccumuloPersistentVersion(v.getFileSystem(), path); }
public static synchronized void updateAccumuloVersion(VolumeManager fs, int oldVersion) { for (Volume volume : fs.getVolumes()) { try { if (getAccumuloPersistentVersion(volume) == oldVersion) { log.debug("Attempting to upgrade {}", volume); Path dataVersionLocation = ServerConstants.getDataVersionLocation(volume); fs.create(new Path(dataVersionLocation, Integer.toString(ServerConstants.DATA_VERSION))) .close(); // TODO document failure mode & recovery if FS permissions cause above to work and below // to fail ACCUMULO-2596 Path prevDataVersionLoc = new Path(dataVersionLocation, Integer.toString(oldVersion)); if (!fs.delete(prevDataVersionLoc)) { throw new RuntimeException("Could not delete previous data version location (" + prevDataVersionLoc + ") for " + volume); } } } catch (IOException e) { throw new RuntimeException("Unable to set accumulo version: an error occurred.", e); } } }
private static void addVolumes(VolumeManager fs, SiteConfiguration siteConfig, Configuration hadoopConf) throws IOException { String[] volumeURIs = VolumeConfiguration.getVolumeUris(siteConfig, hadoopConf); HashSet<String> initializedDirs = new HashSet<>(); initializedDirs.addAll( Arrays.asList(ServerConstants.checkBaseUris(siteConfig, hadoopConf, volumeURIs, true))); HashSet<String> uinitializedDirs = new HashSet<>(); uinitializedDirs.addAll(Arrays.asList(volumeURIs)); uinitializedDirs.removeAll(initializedDirs); Path aBasePath = new Path(initializedDirs.iterator().next()); Path iidPath = new Path(aBasePath, ServerConstants.INSTANCE_ID_DIR); Path versionPath = new Path(aBasePath, ServerConstants.VERSION_DIR); UUID uuid = UUID.fromString(ZooUtil.getInstanceIDFromHdfs(iidPath, siteConfig, hadoopConf)); for (Pair<Path,Path> replacementVolume : ServerConstants.getVolumeReplacements(siteConfig, hadoopConf)) { if (aBasePath.equals(replacementVolume.getFirst())) log.error( "{} is set to be replaced in {} and should not appear in {}." + " It is highly recommended that this property be removed as data" + " could still be written to this volume.", aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES); } if (ServerUtil.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath) != ServerConstants.DATA_VERSION) { throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + ServerUtil.getAccumuloPersistentVersion(fs)); } initDirs(fs, uuid, uinitializedDirs.toArray(new String[uinitializedDirs.size()]), true); }
currentIid = ZooUtil.getInstanceIDFromHdfs(path, conf, hadoopConf); Path vpath = new Path(baseDir, VERSION_DIR); currentVersion = ServerUtil.getAccumuloPersistentVersion(vpath.getFileSystem(hadoopConf), vpath); } catch (Exception e) {