@Override public DataMovementManager newDataMovementManager() { DataMovementManagerImpl moveMgr = new DataMovementManagerImpl(this); return moveMgr; } @Override
@Override public Batcher withForestConfig(ForestConfiguration forestConfig) { if ( forestConfig == null ) throw new IllegalArgumentException("forestConfig must not be null"); if (moveMgr.getConnectionType() == DatabaseClient.ConnectionType.GATEWAY && !(forestConfig instanceof ForestConfigurationImpl)) throw new IllegalArgumentException("cannot change internal forestConfig when using a gateway"); this.forestConfig = forestConfig; return this; }
@Override public QueryBatcher newQueryBatcher(Iterator<String> iterator) { if ( iterator == null ) throw new IllegalArgumentException("iterator must not be null"); return newQueryBatcher(new QueryBatcherImpl(iterator, this, getForestConfig())); }
Forest forest = hosts.get(host); newHostInfos[i].client = getMoveMgr().getForestClient(forest); if (getMoveMgr().getConnectionType() == DatabaseClient.ConnectionType.DIRECT) { logger.info("Adding DatabaseClient on port {} for host \"{}\" to the rotation", newHostInfos[i].client.getPort(), host); String primaryHost = moveMgrImpl.getPrimaryClient().getHost(); if ( removedHostInfos.containsKey(primaryHost) ) { int randomPos = Math.abs(primaryHost.hashCode()) % newHostInfos.length; moveMgrImpl.setPrimaryClient(newHostInfos[randomPos].client);
@Override public synchronized QueryBatcher withForestConfig(ForestConfiguration forestConfig) { super.withForestConfig(forestConfig); Forest[] forests = forestConfig.listForests(); Set<Forest> oldForests = new HashSet<>(forestResults.keySet()); Map<String,Forest> hosts = new HashMap<>(); for ( Forest forest : forests ) { if ( forest.getPreferredHost() == null ) throw new IllegalStateException("Hostname must not be null for any forest"); hosts.put(forest.getPreferredHost(), forest); if ( forestResults.get(forest) == null ) forestResults.put(forest, new AtomicLong()); if ( forestIsDone.get(forest) == null ) forestIsDone.put(forest, new AtomicBoolean(false)); if ( retryForestMap.get(forest) == null ) retryForestMap.put(forest, new AtomicInteger(0)); } logger.info("(withForestConfig) Using forests on {} hosts for \"{}\"", hosts.keySet(), forests[0].getDatabaseName()); List<DatabaseClient> newClientList = new ArrayList<>(); for ( String host : hosts.keySet() ) { Forest forest = hosts.get(host); DatabaseClient client = getMoveMgr().getForestClient(forest); newClientList.add(client); } clientList.set(newClientList); boolean started = (threadPool != null); if ( started == true && oldForests.size() > 0 ) calucluateDeltas(oldForests, forests); return this; }
private synchronized void calucluateDeltas(Set<Forest> oldForests, Forest[] forests) { // the forests we haven't known about yet Set<Forest> addedForests = new HashSet<>(); // the forests that we knew about but they were black-listed and are no longer black-listed Set<Forest> restartedForests = new HashSet<>(); // any known forest might now be black-listed Set<Forest> blackListedForests = new HashSet<>(oldForests); for ( Forest forest : forests ) { if ( ! oldForests.contains(forest) ) { // we need to do special handling since we're adding this new forest after we're started addedForests.add(forest); } // if we have blackListedTasks for this forest, let's restart them if ( blackListedTasks.get(forest) != null ) restartedForests.add(forest); // this forest is not black-listed blackListedForests.remove(forest); } if ( blackListedForests.size() > 0 ) { DataMovementManagerImpl moveMgrImpl = getMoveMgr(); String primaryHost = moveMgrImpl.getPrimaryClient().getHost(); if ( getHostNames(blackListedForests).contains(primaryHost) ) { int randomPos = Math.abs(primaryHost.hashCode()) % clientList.get().size(); moveMgrImpl.setPrimaryClient(clientList.get().get(randomPos)); } } cleanupExistingTasks(addedForests, restartedForests, blackListedForests); }
public DatabaseClient getForestClient(Forest forest) { if ( forest == null ) throw new IllegalArgumentException("forest must not be null"); if (getConnectionType() == DatabaseClient.ConnectionType.GATEWAY) { return getPrimaryClient(); } String hostName = forest.getPreferredHost(); String key = hostName; DatabaseClient client = clientMap.get(key); if ( client != null ) return client; // since this is shared across threads, let's get an exclusive lock on it before updating it synchronized(clientMap) { // just to avoid creating unnecessary DatabaseClient instances, let's check one more time if // another thread just barely inserted an instance that matches client = clientMap.get(key); if ( client != null ) return client; client = DatabaseClientFactory.newClient( hostName, primaryClient.getPort(), forest.getDatabaseName(), primaryClient.getSecurityContext() ); clientMap.put(key, client); } return client; }
@Override public QueryBatcher newQueryBatcher(RawCombinedQueryDefinition query) { return newQueryBatcherImpl(query); } @Override
@Override public DatabaseClient getPrimaryClient() { return ((DataMovementManagerImpl) getMoveMgr()).getPrimaryClient(); }
private ForestConfiguration getForestConfig() { if ( forestConfig != null ) return forestConfig; return readForestConfig(); }
public DataMovementManagerImpl(DatabaseClient client) { setPrimaryClient(client); clientMap.put(primaryClient.getHost(), primaryClient); }
@Override public WriteBatcher newWriteBatcher() { WriteBatcherImpl batcher = new WriteBatcherImpl(this, getForestConfig()); batcher.onBatchFailure(new HostAvailabilityListener(this)); WriteJobReportListener writeJobListener = new WriteJobReportListener(); batcher.onBatchFailure(writeJobListener); batcher.onBatchFailure(new NoResponseListener(this)); batcher.onBatchSuccess(writeJobListener); return batcher; }
return; DatabaseClient client = ((DataMovementManagerImpl) moveMgr).getForestClient(forest); Calendar queryStart = Calendar.getInstance(); QueryBatchImpl batch = new QueryBatchImpl()
@Override public QueryBatcher newQueryBatcher(RawStructuredQueryDefinition query) { return newQueryBatcherImpl(query); }
@Override public DatabaseClient getPrimaryClient() { return getMoveMgr().getPrimaryClient(); }
@Test public void testArgs() throws Exception { if (moveMgr.getConnectionType() == DatabaseClient.ConnectionType.GATEWAY) return; int defaultPort = client.getPort(); Class<?> defaultAuthContext = client.getSecurityContext().getClass(); ForestConfiguration forestConfig = moveMgr.readForestConfig(); Forest[] forests = forestConfig.listForests(); String defaultDatabase = forests[0].getDatabaseName(); // expect three forests per node assertTrue(forests.length % 3 == 0); for ( Forest forest : forests ) { DatabaseClient forestClient = ((DataMovementManagerImpl) moveMgr).getForestClient(forest); // not all forests for a database are on the same host, so all we // can check is that the hostname is not null assertNotNull(forest.getHost()); // not all hosts have the original REST server, but all hosts have the uber port assertEquals(defaultPort, forestClient.getPort()); assertEquals(defaultDatabase, forest.getDatabaseName()); assertEquals(defaultAuthContext, forestClient.getSecurityContext().getClass()); assertEquals(true, forest.isUpdateable()); if ( forest.getForestName() == null || ! forest.getForestName().startsWith("java-unittest-") ) { fail("Unexpected forestName \"" + forest.getForestName() + "\""); } } } }
private QueryBatcher newQueryBatcherImpl(QueryDefinition query) { if ( query == null ) throw new IllegalArgumentException("query must not be null"); return newQueryBatcher(new QueryBatcherImpl(query, this, getForestConfig())); }
@Override public QueryBatcher newQueryBatcher(RawCtsQueryDefinition query) { return newQueryBatcherImpl(query); }
@Override public void retryListener(QueryBatch batch, QueryBatchListener queryBatchListener) { // We get the batch and modify the client alone in order to make use // of the new forest client in case if the original host is unavailable. DatabaseClient client = null; Forest[] forests = batch.getBatcher().getForestConfig().listForests(); for(Forest forest : forests) { if(forest.equals(batch.getForest())) client = getMoveMgr().getForestClient(forest); } QueryBatchImpl retryBatch = new QueryBatchImpl() .withClient( client ) .withBatcher( batch.getBatcher() ) .withTimestamp( batch.getTimestamp() ) .withServerTimestamp( batch.getServerTimestamp() ) .withItems( batch.getItems() ) .withJobTicket( batch.getJobTicket() ) .withJobBatchNumber( batch.getJobBatchNumber() ) .withJobResultsSoFar( batch.getJobResultsSoFar() ) .withForestBatchNumber( batch.getForestBatchNumber() ) .withForestResultsSoFar( batch.getForestResultsSoFar() ) .withForest( batch.getForest() ) .withJobTicket( batch.getJobTicket() ); queryBatchListener.processEvent(retryBatch); }
@Override public QueryBatcher newQueryBatcher(StructuredQueryDefinition query) { return newQueryBatcherImpl(query); }