public SolrIndex(final Configuration config) throws BackendException { Preconditions.checkArgument(config!=null); configuration = config; mode = Mode.parse(config.get(SOLR_MODE)); dynFields = config.get(DYNAMIC_FIELDS); keyFieldIds = parseKeyFieldsForCollections(config); maxResults = config.get(INDEX_MAX_RESULT_SET_SIZE); ttlField = config.get(TTL_FIELD); waitSearcher = config.get(WAIT_SEARCHER); if (mode==Mode.CLOUD) { String zookeeperUrl = config.get(SolrIndex.ZOOKEEPER_URL); CloudSolrClient cloudServer = new CloudSolrClient(zookeeperUrl, true); cloudServer.connect(); solrClient = cloudServer; } else if (mode==Mode.HTTP) { HttpClient clientParams = HttpClientUtil.createClient(new ModifiableSolrParams() {{ add(HttpClientUtil.PROP_ALLOW_COMPRESSION, config.get(HTTP_ALLOW_COMPRESSION).toString()); add(HttpClientUtil.PROP_CONNECTION_TIMEOUT, config.get(HTTP_CONNECTION_TIMEOUT).toString()); add(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, config.get(HTTP_MAX_CONNECTIONS_PER_HOST).toString()); add(HttpClientUtil.PROP_MAX_CONNECTIONS, config.get(HTTP_GLOBAL_MAX_CONNECTIONS).toString()); }}); solrClient = new LBHttpSolrClient(clientParams, config.get(HTTP_URLS)); } else { throw new IllegalArgumentException("Unsupported Solr operation mode: " + mode); } }
/** * Create a {@link HttpSolrClient} based on provided configuration. */ public LBHttpSolrClient build() { return new LBHttpSolrClient(this); }
private SolrClient getStandaloneQuerySolrClient() { LBHttpSolrClient lbHttpSolrClient = null; try { if (StringUtils.isEmpty(solrSlaves) && StringUtils.isNotEmpty(solrMaster)) { LOG.debug("Creating LBHttpSolrClient using solrMaster {}", solrMaster); lbHttpSolrClient = new LBHttpSolrClient(solrMaster); } else if (StringUtils.isNotEmpty(solrSlaves)) { LOG.debug("Creating LBHttpSolrClient using solrSlaves {}", solrSlaves); lbHttpSolrClient = new LBHttpSolrClient(solrSlaves); if (solrAllowMasterQueriesEnabled && StringUtils.isNotEmpty(solrMaster)) { LOG.debug("Adding solrMaster {} to the LBHttpSolrClient", solrSlaves); lbHttpSolrClient.addSolrServer(solrMaster); } } else if (StringUtils.isEmpty(solrSlaves) && StringUtils.isEmpty(solrMaster)) { // unexpected throw new AEMSolrSearchException("Initialization failed. " + "Either 'solr.master' or 'solr.slaves' properties are missing for Standalone mode."); } else { // Do nothing } } catch (MalformedURLException e) { LOG.error("Error for malformed URL.", e); } catch (AEMSolrSearchException e) { LOG.error("Solr client initialization failed.", e); } lbHttpSolrClient.setParser(new XMLResponseParser()); return lbHttpSolrClient; }
solrServer = new LBHttpSolrClient( conf.getString( CONF_HTTP_SERVERS).split(","));
solrServer = new LBHttpSolrClient( conf.getString( CONF_HTTP_SERVERS).split(","));
/** * Builds a new SolRDF proxy instance. * * @return a new SolRDF proxy instance. * @throws UnableToBuildSolRDFClientException in case of build failure. */ public SolRDF build() throws UnableToBuildSolRDFClientException { if (endpoints.isEmpty()) { endpoints.add(DEFAULT_ENDPOINT); } // FIXME: for DatasetAccessor and (HTTP) query execution service we also need something like LBHttpSolrServer final String firstEndpointAddress = endpoints.iterator().next(); try { return new SolRDF( DatasetAccessorFactory.createHTTP( firstEndpointAddress + graphStoreProtocolEndpointPath), firstEndpointAddress + sparqlEndpointPath, zkHost != null ? new CloudSolrClient(zkHost) : (endpoints.size() == 1) ? new HttpSolrClient(endpoints.iterator().next(), httpClient) : new LBHttpSolrClient(httpClient, endpoints.toArray(new String[endpoints.size()]))); } catch (final Exception exception) { throw new UnableToBuildSolRDFClientException(exception); } } }
this.clientIsInternal = httpClient == null; this.myClient = httpClient == null ? HttpClientUtil.createClient(null) : httpClient; this.lbClient = new LBHttpSolrClient(myClient); this.lbClient.setRequestWriter(new BinaryRequestWriter()); this.lbClient.setParser(new BinaryResponseParser());
/** * @param zkHost * A zookeeper client endpoint. * @param updatesToLeaders * If true, sends updates only to shard leaders. * @param httpClient * the {@link HttpClient} instance to be used for all requests. The provided httpClient should use a * multi-threaded connection manager. * @see #CloudSolrClient(String) for full description and details on zkHost */ public CloudSolrClient(String zkHost, boolean updatesToLeaders, HttpClient httpClient) { this.zkHost = zkHost; this.clientIsInternal = httpClient == null; this.myClient = httpClient == null ? HttpClientUtil.createClient(null) : httpClient; this.lbClient = new LBHttpSolrClient(myClient); this.lbClient.setRequestWriter(new BinaryRequestWriter()); this.lbClient.setParser(new BinaryResponseParser()); this.updatesToLeaders = updatesToLeaders; shutdownLBHttpSolrServer = true; lbClient.addQueryParams(STATE_VERSION); }
String[] solrUrlElements = StringUtils.split(SolrClientUrl); try { this.adminServer = new LBHttpSolrClient(solrUrlElements); } catch (MalformedURLException e) { throw new GoraException(e); this.server = new LBHttpSolrClient( solrUrlElements + "/" + mapping.getCoreName() ); } catch (MalformedURLException e) { throw new GoraException(e);
this.clientIsInternal = true; this.myClient = HttpClientUtil.createClient(null); this.lbClient = new LBHttpSolrClient(myClient); this.lbClient.setRequestWriter(new BinaryRequestWriter()); this.lbClient.setParser(new BinaryResponseParser());
this.clientIsInternal = httpClient == null; this.myClient = httpClient == null ? HttpClientUtil.createClient(null) : httpClient; this.lbClient = new LBHttpSolrClient(myClient); this.lbClient.setRequestWriter(new BinaryRequestWriter()); this.lbClient.setParser(new BinaryResponseParser());
}}); solrClient = new LBHttpSolrClient(clientParams, config.get(HTTP_URLS));
}}); solrClient = new LBHttpSolrClient(clientParams, config.get(HTTP_URLS));