@Override public DatabaseClient getPrimaryClient() { return getMoveMgr().getPrimaryClient(); }
@Override public synchronized QueryBatcher withForestConfig(ForestConfiguration forestConfig) { super.withForestConfig(forestConfig); Forest[] forests = forestConfig.listForests(); Set<Forest> oldForests = new HashSet<>(forestResults.keySet()); Map<String,Forest> hosts = new HashMap<>(); for ( Forest forest : forests ) { if ( forest.getPreferredHost() == null ) throw new IllegalStateException("Hostname must not be null for any forest"); hosts.put(forest.getPreferredHost(), forest); if ( forestResults.get(forest) == null ) forestResults.put(forest, new AtomicLong()); if ( forestIsDone.get(forest) == null ) forestIsDone.put(forest, new AtomicBoolean(false)); if ( retryForestMap.get(forest) == null ) retryForestMap.put(forest, new AtomicInteger(0)); } logger.info("(withForestConfig) Using forests on {} hosts for \"{}\"", hosts.keySet(), forests[0].getDatabaseName()); List<DatabaseClient> newClientList = new ArrayList<>(); for ( String host : hosts.keySet() ) { Forest forest = hosts.get(host); DatabaseClient client = getMoveMgr().getForestClient(forest); newClientList.add(client); } clientList.set(newClientList); boolean started = (threadPool != null); if ( started == true && oldForests.size() > 0 ) calucluateDeltas(oldForests, forests); return this; }
private synchronized void calucluateDeltas(Set<Forest> oldForests, Forest[] forests) { // the forests we haven't known about yet Set<Forest> addedForests = new HashSet<>(); // the forests that we knew about but they were black-listed and are no longer black-listed Set<Forest> restartedForests = new HashSet<>(); // any known forest might now be black-listed Set<Forest> blackListedForests = new HashSet<>(oldForests); for ( Forest forest : forests ) { if ( ! oldForests.contains(forest) ) { // we need to do special handling since we're adding this new forest after we're started addedForests.add(forest); } // if we have blackListedTasks for this forest, let's restart them if ( blackListedTasks.get(forest) != null ) restartedForests.add(forest); // this forest is not black-listed blackListedForests.remove(forest); } if ( blackListedForests.size() > 0 ) { DataMovementManagerImpl moveMgrImpl = getMoveMgr(); String primaryHost = moveMgrImpl.getPrimaryClient().getHost(); if ( getHostNames(blackListedForests).contains(primaryHost) ) { int randomPos = Math.abs(primaryHost.hashCode()) % clientList.get().size(); moveMgrImpl.setPrimaryClient(clientList.get().get(randomPos)); } } cleanupExistingTasks(addedForests, restartedForests, blackListedForests); }
threadPool.execute(new QueryTask(getMoveMgr(), this, forest, query, 1, 1));
private void retry(QueryEvent queryEvent, boolean callFailListeners) { if ( isStopped() == true ) { logger.warn("Job is now stopped, aborting the retry"); return; } Forest retryForest = null; for ( Forest forest : getForestConfig().listForests() ) { if ( forest.equals(queryEvent.getForest()) ) { // while forest and queryEvent.getForest() have equivalent forest id, // we expect forest to have the currently available host info retryForest = forest; break; } } if ( retryForest == null ) { throw new IllegalStateException("Forest for queryEvent (" + queryEvent.getForest().getForestName() + ") is not in current getForestConfig()"); } // we're obviously not done with this forest forestIsDone.get(retryForest).set(false); retryForestMap.get(retryForest).incrementAndGet(); long start = queryEvent.getForestResultsSoFar() + 1; logger.trace("retryForest {} on retryHost {} at start {}", retryForest.getForestName(), retryForest.getPreferredHost(), start); QueryTask runnable = new QueryTask(getMoveMgr(), this, retryForest, query, queryEvent.getForestBatchNumber(), start, queryEvent.getJobBatchNumber(), callFailListeners); runnable.run(); } /*
private synchronized void startQuerying() { boolean consistentSnapshotFirstQueryHasRun = false; for ( Forest forest : getForestConfig().listForests() ) { QueryTask runnable = new QueryTask(getMoveMgr(), this, forest, query, 1, 1); if ( consistentSnapshot == true && consistentSnapshotFirstQueryHasRun == false ) { // let's run this first time in-line so we'll have the serverTimestamp set // before we launch all the parallel threads runnable.run(); consistentSnapshotFirstQueryHasRun = true; } else { threadPool.execute(runnable); } } }
@Override public void retryListener(QueryBatch batch, QueryBatchListener queryBatchListener) { // We get the batch and modify the client alone in order to make use // of the new forest client in case if the original host is unavailable. DatabaseClient client = null; Forest[] forests = batch.getBatcher().getForestConfig().listForests(); for(Forest forest : forests) { if(forest.equals(batch.getForest())) client = getMoveMgr().getForestClient(forest); } QueryBatchImpl retryBatch = new QueryBatchImpl() .withClient( client ) .withBatcher( batch.getBatcher() ) .withTimestamp( batch.getTimestamp() ) .withServerTimestamp( batch.getServerTimestamp() ) .withItems( batch.getItems() ) .withJobTicket( batch.getJobTicket() ) .withJobBatchNumber( batch.getJobBatchNumber() ) .withJobResultsSoFar( batch.getJobResultsSoFar() ) .withForestBatchNumber( batch.getForestBatchNumber() ) .withForestResultsSoFar( batch.getForestResultsSoFar() ) .withForest( batch.getForest() ) .withJobTicket( batch.getJobTicket() ); queryBatchListener.processEvent(retryBatch); }