public SolrClient getSiteServer() { BroadleafRequestContext ctx = BroadleafRequestContext.getBroadleafRequestContext(); Site site = ctx.getNonPersistentSite(); CloudSolrClient client = (CloudSolrClient) primaryServer; client.connect(); String aliasName = getSiteAliasName(site); if (aliasName != null) { String collectionName = getSiteCollectionName(site); createCollectionIfNotExist(client, collectionName); createAliasIfNotExist(client, collectionName, collectionName); } return client; }
public SolrClient getSiteReindexServer() { BroadleafRequestContext ctx = BroadleafRequestContext.getBroadleafRequestContext(); Site site = ctx.getNonPersistentSite(); CloudSolrClient client = (CloudSolrClient) primaryServer; client.connect(); String aliasName = getSiteReindexAliasName(site); if (aliasName != null) { String collectionName = getSiteReindexCollectionName(site); createCollectionIfNotExist(client, collectionName); createAliasIfNotExist(client, collectionName, collectionName); } return client; }
public SolrIndex(final Configuration config) throws BackendException { Preconditions.checkArgument(config!=null); configuration = config; mode = Mode.parse(config.get(SOLR_MODE)); dynFields = config.get(DYNAMIC_FIELDS); keyFieldIds = parseKeyFieldsForCollections(config); maxResults = config.get(INDEX_MAX_RESULT_SET_SIZE); ttlField = config.get(TTL_FIELD); waitSearcher = config.get(WAIT_SEARCHER); if (mode==Mode.CLOUD) { String zookeeperUrl = config.get(SolrIndex.ZOOKEEPER_URL); CloudSolrClient cloudServer = new CloudSolrClient(zookeeperUrl, true); cloudServer.connect(); solrClient = cloudServer; } else if (mode==Mode.HTTP) { HttpClient clientParams = HttpClientUtil.createClient(new ModifiableSolrParams() {{ add(HttpClientUtil.PROP_ALLOW_COMPRESSION, config.get(HTTP_ALLOW_COMPRESSION).toString()); add(HttpClientUtil.PROP_CONNECTION_TIMEOUT, config.get(HTTP_CONNECTION_TIMEOUT).toString()); add(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, config.get(HTTP_MAX_CONNECTIONS_PER_HOST).toString()); add(HttpClientUtil.PROP_MAX_CONNECTIONS, config.get(HTTP_GLOBAL_MAX_CONNECTIONS).toString()); }}); solrClient = new LBHttpSolrClient(clientParams, config.get(HTTP_URLS)); } else { throw new IllegalArgumentException("Unsupported Solr operation mode: " + mode); } }
CloudSolrClient reindexCloudClient = (CloudSolrClient) solrConfiguration.getReindexServer(); try { primaryCloudClient.connect(); Aliases aliases = primaryCloudClient.getZkStateReader().getAliases(); Map<String, String> aliasCollectionMap = aliases.getCollectionAliasMap();
primary.connect(); //This is required to ensure no NPE!
@Override public boolean isValid(int timeout) throws SQLException { // check that the connection isn't closed and able to connect within the timeout try { if(!isClosed()) { this.client.connect(timeout, TimeUnit.SECONDS); return true; } } catch (InterruptedException|TimeoutException ignore) { // Ignore error since connection is not valid } return false; }
public synchronized CloudSolrClient getCloudSolrClient(String zkHost) { CloudSolrClient client = null; if (solrClients.containsKey(zkHost)) { client = (CloudSolrClient) solrClients.get(zkHost); } else { client = new CloudSolrClient(zkHost); client.connect(); solrClients.put(zkHost, client); } return client; }
/** * Connect to a cluster. If the cluster is not ready, retry connection up to a given timeout. * @param duration the timeout * @param timeUnit the units of the timeout * @throws TimeoutException if the cluster is not ready after the timeout * @throws InterruptedException if the wait is interrupted */ public void connect(long duration, TimeUnit timeUnit) throws TimeoutException, InterruptedException { log.info("Waiting for {} {} for cluster at {} to be ready", duration, timeUnit, zkHost); long timeout = System.nanoTime() + timeUnit.toNanos(duration); while (System.nanoTime() < timeout) { try { connect(); log.info("Cluster at {} ready", zkHost); return; } catch (RuntimeException e) { // not ready yet, then... } TimeUnit.MILLISECONDS.sleep(250); } throw new TimeoutException("Timed out waiting for cluster"); }
private boolean connectToZK(CloudSolrClient cloudSolrServer) { log.debug("connecting to {}", cloudSolrServer.getZkHost()); boolean connected = false; for (int i = 0; i < 3; i++) { try { cloudSolrServer.connect(); connected = true; break; } catch (Exception e) { log.warn("could not connect to ZK", e); try { Thread.sleep(3000); } catch (InterruptedException e1) { // do nothing } } } return connected; }
private boolean connectToZK(CloudSolrClient cloudSolrServer) { log.debug("connecting to {}", cloudSolrServer.getZkHost()); boolean connected = false; for (int i = 0; i < 3; i++) { try { cloudSolrServer.connect(); connected = true; break; } catch (Exception e) { log.warn("could not connect to ZK", e); try { Thread.sleep(3000); } catch (InterruptedException e1) { // do nothing } } } return connected; }
private void setCloudSolrClient() { if(this.cache != null) { this.cloudSolrClient = this.cache.getCloudSolrClient(zkHost); } else { final List<String> hosts = new ArrayList<>(); hosts.add(zkHost); this.cloudSolrClient = new Builder(hosts, Optional.empty()).build(); this.cloudSolrClient.connect(); } }
public synchronized CloudSolrClient getCloudSolrClient(String zkHost) { CloudSolrClient client; if (solrClients.containsKey(zkHost)) { client = (CloudSolrClient) solrClients.get(zkHost); } else { final List<String> hosts = new ArrayList<String>(); hosts.add(zkHost); CloudSolrClient.Builder builder = new CloudSolrClient.Builder(hosts, Optional.empty()); if (httpClient != null) { builder = builder.withHttpClient(httpClient); } client = builder.build(); client.connect(); solrClients.put(zkHost, client); } return client; }
/** * Download a named config from Zookeeper to a location on the filesystem * @param configName the name of the config * @param downloadPath the path to write config files to * @throws IOException if an I/O exception occurs */ public void downloadConfig(String configName, Path downloadPath) throws IOException { connect(); zkStateReader.getConfigManager().downloadConfigDir(configName, downloadPath); }
static ClusterState getClusterState(AuthorizedSolrClient<CloudSolrClient> authorizedSolrClient) { authorizedSolrClient.solrClient.connect(); return authorizedSolrClient.solrClient.getZkStateReader().getClusterState(); }
/** * Upload a set of config files to Zookeeper and give it a name * * NOTE: You should only allow trusted users to upload configs. If you * are allowing client access to zookeeper, you should protect the * /configs node against unauthorised write access. * * @param configPath {@link java.nio.file.Path} to the config files * @param configName the name of the config * @throws IOException if an IO error occurs */ public void uploadConfig(Path configPath, String configName) throws IOException { connect(); zkStateReader.getConfigManager().uploadConfigDir(configPath, configName); }
/** * Opens the CloudSolrStream * ***/ public void open() throws IOException { this.tuples = new TreeSet(); this.solrStreams = new ArrayList(); this.eofTuples = Collections.synchronizedMap(new HashMap()); if(this.cache != null) { this.cloudSolrClient = this.cache.getCloudSolrClient(zkHost); } else { this.cloudSolrClient = new CloudSolrClient(zkHost); this.cloudSolrClient.connect(); } constructStreams(); openStreams(); }
protected void initCloud() throws Exception { assert(cloudInit == false); cloudInit = true; cloudClient = createCloudClient(DEFAULT_COLLECTION); cloudClient.connect(); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); chaosMonkey = new ChaosMonkey(zkServer, zkStateReader, DEFAULT_COLLECTION, shardToJetty, shardToLeaderJetty); }
protected CloudSolrClient getCommonCloudSolrClient() { synchronized (this) { if (commonCloudSolrClient == null) { commonCloudSolrClient = getCloudSolrClient(zkServer.getZkAddress(), random().nextBoolean(), 5000, 120000); commonCloudSolrClient.setDefaultCollection(DEFAULT_COLLECTION); commonCloudSolrClient.connect(); log.info("Created commonCloudSolrClient with updatesToLeaders={} and parallelUpdates={}", commonCloudSolrClient.isUpdatesToLeaders(), commonCloudSolrClient.isParallelUpdates()); } } return commonCloudSolrClient; }
final CloudSolrClient server = new CloudSolrClient("localhost:2181"); try { //probably this is the line that missed from your code... server.connect(); final ClusterState clusterState = server.getZkStateReader().getClusterState(); final DocCollection collection = clusterState.getCollection("collection1"); //EVRIKA! collection object // and get the leader of the collection...pretty easy. Replica leader = clusterState.getLeader("collection1", "shard1"); } catch (Exception e) { // do your stuff } finally { server.close(); }
protected static SolrZkClient zkClient() { ZkStateReader reader = cluster.getSolrClient().getZkStateReader(); if (reader == null) cluster.getSolrClient().connect(); return cluster.getSolrClient().getZkStateReader().getZkClient(); }