/** * Called by preAllocateIds when we know that a large number of Id's is going * to be needed shortly. */ protected void loadLargeAllocation(final int allocateSize) { // preAllocateIds was called with a relatively large batchSize // so we will just go ahead and load those anyway in background backgroundExecutor.execute(new Runnable() { public void run() { loadMoreIds(allocateSize, null); } }); }
public void periodicTrim(BackgroundExecutor executor) { EvictionRunnable trim = new EvictionRunnable(); // default to trimming the cache every 60 seconds long trimFreqSecs = (trimFrequency == 0) ? 60 : trimFrequency; executor.executePeriodically(trim, trimFreqSecs, TimeUnit.SECONDS); }
/** * Called by preAllocateIds when we know that a large number of Id's is going to * be needed shortly. */ protected void loadLargeAllocation(final int allocateSize) { // preAllocateIds was called with a relatively large batchSize // so we will just go ahead and load those anyway in background backgroundExecutor.execute(new Runnable() { public void run() { loadMoreIds(allocateSize, null); } }); }
public void init(EbeanServer server) { TrimTask trim = new TrimTask(); BackgroundExecutor executor = server.getBackgroundExecutor(); executor.executePeriodically(trim, trimFrequency, TimeUnit.SECONDS); }
/** * Called by preAllocateIds when we know that a large number of Id's is going * to be needed shortly. */ protected void loadLargeAllocation(final int allocateSize) { // preAllocateIds was called with a relatively large batchSize // so we will just go ahead and load those anyway in background backgroundExecutor.execute(() -> loadMoreIds(allocateSize, null)); }
public void init(EbeanServer server) { TrimTask trim = new TrimTask(); BackgroundExecutor executor = server.getBackgroundExecutor(); executor.executePeriodically(trim, trimFrequency, TimeUnit.SECONDS); }
/** * Load another batch of Id's using a background thread. */ protected void loadBatchInBackground() { // single threaded processing... synchronized (backgroundLoadMonitor) { if (currentlyBackgroundLoading > 0){ // skip as already background loading if (logger.isLoggable(Level.FINE)){ logger.log(Level.FINE, "... skip background sequence load (another load in progress)"); } return; } currentlyBackgroundLoading = batchSize; backgroundExecutor.execute(new Runnable() { public void run() { loadMoreIds(batchSize, null); synchronized (backgroundLoadMonitor) { currentlyBackgroundLoading = 0; } } }); } }
/** * Load the query tuning information from it's data store. */ @Override public void startup() { if (queryTuning) { loadTuningFile(); if (isRuntimeTuningUpdates()) { // periodically gather and update query tuning server.getBackgroundExecutor().executePeriodically(new ProfilingUpdate(), profilingUpdateFrequency, TimeUnit.SECONDS); } } }
/** * Load another batch of Id's using a background thread. */ protected void loadBatchInBackground() { // single threaded processing... synchronized (backgroundLoadMonitor) { if (currentlyBackgroundLoading > 0) { // skip as already background loading if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "... skip background sequence load (another load in progress)"); } return; } currentlyBackgroundLoading = batchSize; backgroundExecutor.execute(new Runnable() { public void run() { loadMoreIds(batchSize, null); synchronized (backgroundLoadMonitor) { currentlyBackgroundLoading = 0; } } }); } }
/** * Load another batch of Id's using a background thread. */ protected void loadBatchInBackground() { // single threaded processing... synchronized (backgroundLoadMonitor) { if (currentlyBackgroundLoading > 0) { // skip as already background loading logger.debug("... skip background sequence load (another load in progress)"); return; } currentlyBackgroundLoading = batchSize; backgroundExecutor.execute(() -> { loadMoreIds(batchSize, null); synchronized (backgroundLoadMonitor) { currentlyBackgroundLoading = 0; } }); } }
/** * Prepare and then send/log the changeSet. */ public void sendChangeLog(final ChangeSet changeSet) { // can set userId, userIpAddress & userContext if desired if (changeLogPrepare.prepare(changeSet)) { // call the log method in background backgroundExecutor.execute(() -> changeLogListener.log(changeSet)); } }
/** * Process a local committed transaction. */ public void notifyOfCommit(SpiTransaction transaction) { try { if (TXN_LOGGER.isDebugEnabled()) { TXN_LOGGER.debug(transaction.getLogPrefix() + "Commit"); } PostCommitProcessing postCommit = new PostCommitProcessing(clusterManager, this, transaction); postCommit.notifyLocalCache(); backgroundExecutor.execute(postCommit.backgroundNotify()); } catch (Exception ex) { logger.error("NotifyOfCommit failed. L2 Cache potentially not notified.", ex); } }
/** * Process a Transaction that comes from another framework or local code. * <p> * For cases where raw SQL/JDBC or other frameworks are used this can * invalidate the appropriate parts of the cache. * </p> */ public void externalModification(TransactionEventTable tableEvents) { TransactionEvent event = new TransactionEvent(); event.add(tableEvents); PostCommitProcessing postCommit = new PostCommitProcessing(clusterManager, this, event); postCommit.notifyLocalCache(); backgroundExecutor.execute(postCommit.backgroundNotify()); }
/** * Process a Transaction that comes from another framework or local code. * <p> * For cases where raw SQL/JDBC or other frameworks are used this can * invalidate the appropriate parts of the cache. * </p> */ public void externalModification(TransactionEventTable tableEvents) { TransactionEvent event = new TransactionEvent(); event.add(tableEvents); PostCommitProcessing postCommit = new PostCommitProcessing(clusterManager, this, null, event); // invalidate parts of local cache and index postCommit.notifyLocalCacheIndex(); backgroundExecutor.execute(postCommit.notifyPersistListeners()); }
/** * Process a Transaction that comes from another framework or local code. * <p> * For cases where raw SQL/JDBC or other frameworks are used this can * invalidate the appropriate parts of the cache. * </p> */ public void externalModification(TransactionEventTable tableEvents) { TransactionEvent event = new TransactionEvent(); event.add(tableEvents); PostCommitProcessing postCommit = new PostCommitProcessing(clusterManager, this, null, event); // invalidate parts of local cache and index postCommit.notifyLocalCacheIndex(); backgroundExecutor.execute(postCommit.notifyPersistListeners()); }
/** * Process a local committed transaction. */ public void notifyOfCommit(SpiTransaction transaction) { try { log(transaction.getLogBuffer()); PostCommitProcessing postCommit = new PostCommitProcessing(clusterManager, this, transaction, transaction.getEvent()); postCommit.notifyLocalCacheIndex(); postCommit.notifyCluster(); // cluster and text indexing backgroundExecutor.execute(postCommit.notifyPersistListeners()); for (TransactionEventListener listener : transactionEventListeners) { listener.postTransactionCommit(transaction); } if (commitDebugLevel >= 1) { logger.info("Transaction ["+transaction.getId()+"] commit"); } } catch (Exception ex) { String m = "NotifyOfCommit failed. Cache/Lucene potentially not notified."; logger.log(Level.SEVERE, m, ex); } }
/** * Process a local committed transaction. */ public void notifyOfCommit(SpiTransaction transaction) { try { log(transaction.getLogBuffer()); PostCommitProcessing postCommit = new PostCommitProcessing(clusterManager, this, transaction, transaction.getEvent()); postCommit.notifyLocalCacheIndex(); postCommit.notifyCluster(); // cluster and text indexing backgroundExecutor.execute(postCommit.notifyPersistListeners()); for (TransactionEventListener listener : transactionEventListeners) { listener.postTransactionCommit(transaction); } if (commitDebugLevel >= 1) { logger.info("Transaction ["+transaction.getId()+"] commit"); } } catch (Exception ex) { String m = "NotifyOfCommit failed. Cache/Lucene potentially not notified."; logger.log(Level.SEVERE, m, ex); } }
backgroundExecutor.execute(future);
backgroundExecutor.execute(future);
backgroundExecutor.execute(future);