Refine search
@Override @SuppressWarnings("unchecked") public IPartitioner getCassandraPartitioner() throws BackendException { Cluster cl = clusterContext.getClient(); try { return FBUtilities.newPartitioner(cl.describePartitioner()); } catch (ConnectionException e) { throw new TemporaryBackendException(e); } catch (ConfigurationException e) { throw new PermanentBackendException(e); } }
if (addr.equals(FBUtilities.getLocalAddress())) CommonsHttpSolrServer solrj = new CommonsHttpSolrServer("http://" + addr.getHostAddress() + ":" + CassandraUtils.port + "/solandra/" + subIndex, new HttpClient(httpConnections));
public Map<String, String> getLoadMap() { Map<String, String> map = new HashMap<>(); for (Map.Entry<InetAddress,Double> entry : LoadBroadcaster.instance.getLoadInfo().entrySet()) { map.put(entry.getKey().getHostAddress(), FileUtils.stringifyFileSize(entry.getValue())); } // gossiper doesn't see its own updates, so we need to special-case the local node map.put(FBUtilities.getBroadcastAddress().getHostAddress(), getLoadString()); return map; }
private String[] getLocations() { Collection<InetAddress> localAddresses = FBUtilities.getAllLocalAddresses(); for (InetAddress address : localAddresses) { for (String location : split.getLocations()) { InetAddress locationAddress; try { locationAddress = InetAddress.getByName(location); } catch (UnknownHostException e) { throw new AssertionError(e); } if (address.equals(locationAddress)) { return new String[] { location }; } } } return split.getLocations(); }
if(! Schema.instance.getNonSystemTables().contains(CassandraUtils.keySpace) ) throw new IOException("Solandra keyspace is missing, please import then retry"); else List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(CassandraUtils.keySpace, subIndex); DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getLocalAddress(), endpoints); String shard = addr.getHostAddress() + ":" + CassandraUtils.port + "/solandra/" + indexName + "~" + i;
public void listen() { callbacks.reset(); // hack to allow tests to stop/restart MS listen(FBUtilities.getLocalAddress()); if (DatabaseDescriptor.shouldListenOnBroadcastAddress() && !FBUtilities.getLocalAddress().equals(FBUtilities.getBroadcastAddress())) { listen(FBUtilities.getBroadcastAddress()); } listenGate.signalAll(); }
private static void hintMutation(Mutation mutation) { String keyspaceName = mutation.getKeyspaceName(); Token token = mutation.key().getToken(); Iterable<InetAddress> endpoints = StorageService.instance.getNaturalAndPendingEndpoints(keyspaceName, token); ArrayList<InetAddress> endpointsToHint = new ArrayList<>(Iterables.size(endpoints)); // local writes can timeout, but cannot be dropped (see LocalMutationRunnable and CASSANDRA-6510), // so there is no need to hint or retry. for (InetAddress target : endpoints) if (!target.equals(FBUtilities.getBroadcastAddress()) && shouldHint(target)) endpointsToHint.add(target); submitHint(mutation, endpointsToHint, null); }
public static boolean isReplacingSameAddress() { InetAddress replaceAddress = DatabaseDescriptor.getReplaceAddress(); return replaceAddress != null && replaceAddress.equals(FBUtilities.getBroadcastAddress()); }
@VisibleForTesting void buildSeedsList() { for (InetAddress seed : DatabaseDescriptor.getSeeds()) { if (seed.equals(FBUtilities.getBroadcastAddress())) continue; seeds.add(seed); } }
static Collection<Token> allocateTokens(final TokenMetadata metadata, InetAddress address, String allocationKeyspace, int numTokens, int schemaWaitDelay) { StorageService.instance.waitForSchema(schemaWaitDelay); if (!FBUtilities.getBroadcastAddress().equals(InetAddress.getLoopbackAddress())) Gossiper.waitToSettle(); Keyspace ks = Keyspace.open(allocationKeyspace); if (ks == null) throw new ConfigurationException("Problem opening token allocation keyspace " + allocationKeyspace); AbstractReplicationStrategy rs = ks.getReplicationStrategy(); return TokenAllocation.allocateTokens(metadata, rs, address, numTokens); }
public InetAddress endPoint() { if (id.equals(FBUtilities.getBroadcastAddress())) return FBUtilities.getLocalAddress(); return resetEndpoint; }
if (DatabaseDescriptor.isReplacing() && !(Boolean.parseBoolean(System.getProperty("cassandra.join_ring", "true")))) throw new ConfigurationException("Cannot set both join_ring=false and attempt to replace a node"); if (DatabaseDescriptor.getReplaceTokens().size() > 0 || DatabaseDescriptor.getReplaceNode() != null) throw new RuntimeException("Replace method removed; use cassandra.replace_address instead"); if (DatabaseDescriptor.isReplacing()) if (!DatabaseDescriptor.isAutoBootstrap()) throw new RuntimeException("Trying to replace_address with auto_bootstrap disabled will not work, check your configuration"); bootstrapTokens = prepareReplacementInfo(); appStates.put(ApplicationState.TOKENS, valueFactory.tokens(bootstrapTokens)); appStates.put(ApplicationState.STATUS, valueFactory.hibernate(true)); else if (shouldBootstrap()) checkForEndpointCollision(); getTokenMetadata().updateHostId(localHostId, FBUtilities.getBroadcastAddress()); appStates.put(ApplicationState.NET_VERSION, valueFactory.networkVersion()); appStates.put(ApplicationState.HOST_ID, valueFactory.hostId(localHostId)); gossipSnitchInfo(); Schema.instance.updateVersionAndAnnounce(); // Ensure we know our own actual Schema UUID in preparation for updates MessagingService.instance().listen(FBUtilities.getLocalAddress()); LoadBroadcaster.instance.startBroadcasting();
private void unbootstrap(Runnable onFinish) throws ExecutionException, InterruptedException { Map<String, Multimap<Range<Token>, InetAddress>> rangesToStream = new HashMap<>(); for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces()) { Multimap<Range<Token>, InetAddress> rangesMM = getChangedRangesForLeaving(keyspaceName, FBUtilities.getBroadcastAddress()); if (logger.isDebugEnabled()) logger.debug("Ranges needing transfer are [{}]", StringUtils.join(rangesMM.keySet(), ",")); rangesToStream.put(keyspaceName, rangesMM); } setMode(Mode.LEAVING, "replaying batch log and streaming data to other nodes", true); // Start with BatchLog replay, which may create hints but no writes since this is no longer a valid endpoint. Future<?> batchlogReplay = BatchlogManager.instance.startBatchlogReplay(); Future<StreamState> streamSuccess = streamRanges(rangesToStream); // Wait for batch log to complete before streaming hints. logger.debug("waiting for batch log processing."); batchlogReplay.get(); setMode(Mode.LEAVING, "streaming hints to other nodes", true); Future hintsSuccess = streamHints(); // wait for the transfer runnables to signal the latch. logger.debug("waiting for stream acks."); streamSuccess.get(); hintsSuccess.get(); logger.debug("stream acks all received."); leaveRing(); onFinish.run(); }
public static List<InetAddress> getLiveSortedEndpoints(Keyspace keyspace, RingPosition pos) { List<InetAddress> liveEndpoints = StorageService.instance.getLiveNaturalEndpoints(keyspace, pos); DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getBroadcastAddress(), liveEndpoints); return liveEndpoints; }
public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException { IEndpointSnitch oldSnitch = DatabaseDescriptor.getEndpointSnitch(); // new snitch registers mbean during construction IEndpointSnitch newSnitch; try { newSnitch = FBUtilities.construct(epSnitchClassName, "snitch"); } catch (ConfigurationException e) { throw new ClassNotFoundException(e.getMessage()); } if (dynamic) { DatabaseDescriptor.setDynamicUpdateInterval(dynamicUpdateInterval); DatabaseDescriptor.setDynamicResetInterval(dynamicResetInterval); DatabaseDescriptor.setDynamicBadnessThreshold(dynamicBadnessThreshold); newSnitch = new DynamicEndpointSnitch(newSnitch); } // point snitch references to the new instance DatabaseDescriptor.setEndpointSnitch(newSnitch); for (String ks : Schema.instance.getKeyspaces()) { Keyspace.open(ks).getReplicationStrategy().snitch = newSnitch; } if (oldSnitch instanceof DynamicEndpointSnitch) ((DynamicEndpointSnitch)oldSnitch).unregisterMBean(); updateTopology(); }
private static BatchlogEndpoints getBatchlogEndpoints(String localDataCenter, ConsistencyLevel consistencyLevel) throws UnavailableException { TokenMetadata.Topology topology = StorageService.instance.getTokenMetadata().cachedOnlyTokenMap().getTopology(); Multimap<String, InetAddress> localEndpoints = HashMultimap.create(topology.getDatacenterRacks().get(localDataCenter)); String localRack = DatabaseDescriptor.getEndpointSnitch().getRack(FBUtilities.getBroadcastAddress()); Collection<InetAddress> chosenEndpoints = new BatchlogManager.EndpointFilter(localRack, localEndpoints).filter(); if (chosenEndpoints.isEmpty()) { if (consistencyLevel == ConsistencyLevel.ANY) return new BatchlogEndpoints(Collections.singleton(FBUtilities.getBroadcastAddress())); throw new UnavailableException(ConsistencyLevel.ONE, 1, 0); } return new BatchlogEndpoints(chosenEndpoints); }
public static AbstractType<?> getInstance(TypeParser parser) { IPartitioner partitioner = DatabaseDescriptor.getPartitioner(); Iterator<String> argIterator = parser.getKeyValueParameters().keySet().iterator(); if (argIterator.hasNext()) { partitioner = FBUtilities.newPartitioner(argIterator.next()); assert !argIterator.hasNext(); } return partitioner.partitionOrdering(); }
private InetAddress getIndexLocation(String subIndex) { ByteBuffer indexName = CassandraUtils.hashBytes(subIndex.getBytes()); List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(CassandraUtils.keySpace, indexName); if (endpoints.isEmpty()) throw new RuntimeException("Unable to find a live endpoint for: " + subIndex); DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getLocalAddress(), endpoints); if (endpoints.contains(FBUtilities.getLocalAddress())) return FBUtilities.getLocalAddress(); return endpoints.get(0); }
/** * Constructs an instance of the given class, which must have a no-arg or default constructor. * @param classname Fully qualified classname. * @param readable Descriptive noun for the role the class plays. * @throws ConfigurationException If the class cannot be found. */ public static <T> T construct(String classname, String readable) throws ConfigurationException { Class<T> cls = FBUtilities.classForName(classname, readable); return construct(cls, classname, readable); }
/** * Clear all locally stored schema information and reset schema to initial state. * Called by user (via JMX) who wants to get rid of schema disagreement. */ public static void resetLocalSchema() { logger.info("Starting local schema reset..."); logger.debug("Truncating schema tables..."); SchemaKeyspace.truncate(); logger.debug("Clearing local schema keyspace definitions..."); Schema.instance.clear(); Set<InetAddress> liveEndpoints = Gossiper.instance.getLiveMembers(); liveEndpoints.remove(FBUtilities.getBroadcastAddress()); // force migration if there are nodes around for (InetAddress node : liveEndpoints) { if (shouldPullSchemaFrom(node)) { logger.debug("Requesting schema from {}", node); FBUtilities.waitOnFuture(submitMigrationTask(node)); break; } } logger.info("Local schema reset is complete."); }