private EntryTaskScheduler<Data, Integer> createScheduler(ObjectNamespace namespace) { NodeEngine nodeEngine = lockService.getNodeEngine(); LockEvictionProcessor entryProcessor = new LockEvictionProcessor(nodeEngine, namespace); TaskScheduler globalScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); return EntryTaskSchedulerFactory.newScheduler(globalScheduler, entryProcessor, ScheduleType.FOR_EACH); }
private EntryTaskScheduler<Data, Integer> createScheduler(ObjectNamespace namespace) { NodeEngine nodeEngine = lockService.getNodeEngine(); LockEvictionProcessor entryProcessor = new LockEvictionProcessor(nodeEngine, namespace); TaskScheduler globalScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); return EntryTaskSchedulerFactory.newScheduler(globalScheduler, entryProcessor, ScheduleType.FOR_EACH); }
private void asyncCancelRemoteOperations(final Set<Address> addresses) { final NodeEngine nodeEngine = mapReduceService.getNodeEngine(); TaskScheduler taskScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); taskScheduler.execute(new Runnable() { @Override public void run() { String name = getConfiguration().getName(); String jobId = getConfiguration().getJobId(); for (Address address : addresses) { try { CancelJobSupervisorOperation operation = new CancelJobSupervisorOperation(name, jobId); mapReduceService.processRequest(address, operation); } catch (Exception ignore) { // We can ignore this exception since we just want to cancel the job // and the member may be crashed or unreachable in some way ILogger logger = nodeEngine.getLogger(JobSupervisor.class); logger.finest("Remote node may already be down", ignore); } } } }); }
private void asyncCancelRemoteOperations(final Set<Address> addresses) { final NodeEngine nodeEngine = mapReduceService.getNodeEngine(); TaskScheduler taskScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); taskScheduler.execute(new Runnable() { @Override public void run() { String name = getConfiguration().getName(); String jobId = getConfiguration().getJobId(); for (Address address : addresses) { try { CancelJobSupervisorOperation operation = new CancelJobSupervisorOperation(name, jobId); mapReduceService.processRequest(address, operation); } catch (Exception ignore) { // We can ignore this exception since we just want to cancel the job // and the member may be crashed or unreachable in some way ILogger logger = nodeEngine.getLogger(JobSupervisor.class); logger.finest("Remote node may already be down", ignore); } } } }); }
@Override public void init(NodeEngine nodeEngine, Properties properties) { for (int i = 0; i < nodeEngine.getPartitionService().getPartitionCount(); i++) { partitionContainers[i] = new PartitionContainer(this, i); } antiEntropyFuture = nodeEngine.getExecutionService().getGlobalTaskScheduler() .scheduleWithRepetition(antiEntropyTask, 0, SYNC_INTERVAL_SECONDS, TimeUnit.SECONDS); }
@Override public void init(NodeEngine nodeEngine, Properties properties) { for (int i = 0; i < nodeEngine.getPartitionService().getPartitionCount(); i++) { partitionContainers[i] = new PartitionContainer(this, i); } antiEntropyFuture = nodeEngine.getExecutionService().getGlobalTaskScheduler() .scheduleWithRepetition(antiEntropyTask, 0, SYNC_INTERVAL_SECONDS, TimeUnit.SECONDS); }
protected AbstractBaseReplicatedRecordStore(String name, ReplicatedMapService replicatedMapService, int partitionId) { this.name = name; this.partitionId = partitionId; this.nodeEngine = replicatedMapService.getNodeEngine(); this.serializationService = nodeEngine.getSerializationService(); this.partitionService = nodeEngine.getPartitionService(); this.eventService = nodeEngine.getEventService(); this.replicatedMapService = replicatedMapService; this.replicatedMapConfig = replicatedMapService.getReplicatedMapConfig(name); this.storageRef = new AtomicReference<InternalReplicatedMapStorage<K, V>>(); this.storageRef.set(new InternalReplicatedMapStorage<K, V>()); this.ttlEvictionScheduler = EntryTaskSchedulerFactory .newScheduler(nodeEngine.getExecutionService().getGlobalTaskScheduler(), new ReplicatedMapEvictionProcessor(this, nodeEngine, partitionId), ScheduleType.POSTPONE); }
protected AbstractBaseReplicatedRecordStore(String name, ReplicatedMapService replicatedMapService, int partitionId) { this.name = name; this.partitionId = partitionId; this.nodeEngine = replicatedMapService.getNodeEngine(); this.serializationService = nodeEngine.getSerializationService(); this.partitionService = nodeEngine.getPartitionService(); this.eventService = nodeEngine.getEventService(); this.replicatedMapService = replicatedMapService; this.replicatedMapConfig = replicatedMapService.getReplicatedMapConfig(name); this.storageRef = new AtomicReference<InternalReplicatedMapStorage<K, V>>(); this.storageRef.set(new InternalReplicatedMapStorage<K, V>()); this.ttlEvictionScheduler = EntryTaskSchedulerFactory .newScheduler(nodeEngine.getExecutionService().getGlobalTaskScheduler(), new ReplicatedMapEvictionProcessor(this, nodeEngine, partitionId), ScheduleType.POSTPONE); }
PartitionReplicaManager(Node node, InternalPartitionServiceImpl partitionService) { this.node = node; this.nodeEngine = node.nodeEngine; this.logger = node.getLogger(getClass()); this.partitionService = partitionService; int partitionCount = partitionService.getPartitionCount(); partitionStateManager = partitionService.getPartitionStateManager(); HazelcastProperties properties = node.getProperties(); partitionMigrationTimeout = properties.getMillis(GroupProperty.PARTITION_MIGRATION_TIMEOUT); maxParallelReplications = properties.getInteger(GroupProperty.PARTITION_MAX_PARALLEL_REPLICATIONS); replicaSyncSemaphore = new Semaphore(maxParallelReplications); replicaVersions = new PartitionReplicaVersions[partitionCount]; for (int i = 0; i < replicaVersions.length; i++) { replicaVersions[i] = new PartitionReplicaVersions(i); } ExecutionService executionService = nodeEngine.getExecutionService(); TaskScheduler globalScheduler = executionService.getGlobalTaskScheduler(); // The reason behind this scheduler to have POSTPONE type is as follows: // When a node shifts up in the replica table upon a node failure, it sends a sync request to the partition owner and // registers it to the replicaSyncRequests. If another node fails before the already-running sync process completes, // the new sync request is simply scheduled to a further time. Again, before the already-running sync process completes, // if another node fails for the third time, the already-scheduled sync request should be overwritten with the new one. // This is because this node is shifted up to a higher level when the third node failure occurs and its respective sync // request will inherently include the backup data that is requested by the previously scheduled sync request. replicaSyncTimeoutScheduler = EntryTaskSchedulerFactory.newScheduler(globalScheduler, new ReplicaSyncTimeoutProcessor(), ScheduleType.POSTPONE); replicaSyncRequests = newSetFromMap(new ConcurrentHashMap<ReplicaFragmentSyncInfo, Boolean>(partitionCount)); }
PartitionReplicaManager(Node node, InternalPartitionServiceImpl partitionService) { this.node = node; this.nodeEngine = node.nodeEngine; this.logger = node.getLogger(getClass()); this.partitionService = partitionService; int partitionCount = partitionService.getPartitionCount(); partitionStateManager = partitionService.getPartitionStateManager(); HazelcastProperties properties = node.getProperties(); partitionMigrationTimeout = properties.getMillis(GroupProperty.PARTITION_MIGRATION_TIMEOUT); maxParallelReplications = properties.getInteger(GroupProperty.PARTITION_MAX_PARALLEL_REPLICATIONS); replicaSyncSemaphore = new Semaphore(maxParallelReplications); replicaVersions = new PartitionReplicaVersions[partitionCount]; for (int i = 0; i < replicaVersions.length; i++) { replicaVersions[i] = new PartitionReplicaVersions(i); } ExecutionService executionService = nodeEngine.getExecutionService(); TaskScheduler globalScheduler = executionService.getGlobalTaskScheduler(); // The reason behind this scheduler to have POSTPONE type is as follows: // When a node shifts up in the replica table upon a node failure, it sends a sync request to the partition owner and // registers it to the replicaSyncRequests. If another node fails before the already-running sync process completes, // the new sync request is simply scheduled to a further time. Again, before the already-running sync process completes, // if another node fails for the third time, the already-scheduled sync request should be overwritten with the new one. // This is because this node is shifted up to a higher level when the third node failure occurs and its respective sync // request will inherently include the backup data that is requested by the previously scheduled sync request. replicaSyncTimeoutScheduler = EntryTaskSchedulerFactory.newScheduler(globalScheduler, new ReplicaSyncTimeoutProcessor(), ScheduleType.POSTPONE); replicaSyncRequests = newSetFromMap(new ConcurrentHashMap<ReplicaFragmentSyncInfo, Boolean>(partitionCount)); }
public QueueService(NodeEngine nodeEngine) { this.nodeEngine = nodeEngine; this.serializationService = nodeEngine.getSerializationService(); this.partitionService = nodeEngine.getPartitionService(); this.logger = nodeEngine.getLogger(QueueService.class); TaskScheduler globalScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); QueueEvictionProcessor entryProcessor = new QueueEvictionProcessor(nodeEngine); this.queueEvictionScheduler = EntryTaskSchedulerFactory.newScheduler(globalScheduler, entryProcessor, POSTPONE); }
public QueueService(NodeEngine nodeEngine) { this.nodeEngine = nodeEngine; this.serializationService = nodeEngine.getSerializationService(); this.partitionService = nodeEngine.getPartitionService(); this.logger = nodeEngine.getLogger(QueueService.class); TaskScheduler globalScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); QueueEvictionProcessor entryProcessor = new QueueEvictionProcessor(nodeEngine); this.queueEvictionScheduler = EntryTaskSchedulerFactory.newScheduler(globalScheduler, entryProcessor, POSTPONE); }
@SuppressWarnings("checkstyle:magicnumber") @SuppressFBWarnings({"EI_EXPOSE_REP2"}) public ExpirationManager(ClearExpiredRecordsTask task, NodeEngine nodeEngine) { this.task = task; this.nodeEngine = nodeEngine; this.globalTaskScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); this.taskPeriodSeconds = checkPositive(task.getTaskPeriodSeconds(), "taskPeriodSeconds should be a positive number"); this.lifecycleService = getHazelcastInstance().getLifecycleService(); this.lifecycleListenerId = lifecycleService.addLifecycleListener(this); this.partitionService = getHazelcastInstance().getPartitionService(); this.partitionLostListenerId = partitionService.addPartitionLostListener(this); }
@SuppressWarnings("checkstyle:magicnumber") @SuppressFBWarnings({"EI_EXPOSE_REP2"}) public ExpirationManager(ClearExpiredRecordsTask task, NodeEngine nodeEngine) { this.task = task; this.nodeEngine = nodeEngine; this.globalTaskScheduler = nodeEngine.getExecutionService().getGlobalTaskScheduler(); this.taskPeriodSeconds = checkPositive(task.getTaskPeriodSeconds(), "taskPeriodSeconds should be a positive number"); this.lifecycleService = getHazelcastInstance().getLifecycleService(); this.lifecycleListenerId = lifecycleService.addLifecycleListener(this); this.partitionService = getHazelcastInstance().getPartitionService(); this.partitionLostListenerId = partitionService.addPartitionLostListener(this); }
private RepairingTask createRepairingInvalidationTask() { ExecutionService executionService = nodeEngine.getExecutionService(); ClusterService clusterService = nodeEngine.getClusterService(); OperationService operationService = nodeEngine.getOperationService(); HazelcastProperties properties = nodeEngine.getProperties(); ILogger metadataFetcherLogger = nodeEngine.getLogger(MemberMapInvalidationMetaDataFetcher.class); InvalidationMetaDataFetcher invalidationMetaDataFetcher = new MemberMapInvalidationMetaDataFetcher(clusterService, operationService, metadataFetcherLogger); ILogger repairingTaskLogger = nodeEngine.getLogger(RepairingTask.class); String localUuid = nodeEngine.getLocalMember().getUuid(); return new RepairingTask(properties, invalidationMetaDataFetcher, executionService.getGlobalTaskScheduler(), serializationService, partitionService, localUuid, repairingTaskLogger); }
private RepairingTask createRepairingInvalidationTask() { ExecutionService executionService = nodeEngine.getExecutionService(); ClusterService clusterService = nodeEngine.getClusterService(); OperationService operationService = nodeEngine.getOperationService(); HazelcastProperties properties = nodeEngine.getProperties(); ILogger metadataFetcherLogger = nodeEngine.getLogger(MemberMapInvalidationMetaDataFetcher.class); InvalidationMetaDataFetcher invalidationMetaDataFetcher = new MemberMapInvalidationMetaDataFetcher(clusterService, operationService, metadataFetcherLogger); ILogger repairingTaskLogger = nodeEngine.getLogger(RepairingTask.class); String localUuid = nodeEngine.getLocalMember().getUuid(); return new RepairingTask(properties, invalidationMetaDataFetcher, executionService.getGlobalTaskScheduler(), serializationService, partitionService, localUuid, repairingTaskLogger); }
public MapNearCacheManager(MapServiceContext mapServiceContext) { super(mapServiceContext.getNodeEngine().getSerializationService(), mapServiceContext.getNodeEngine().getExecutionService().getGlobalTaskScheduler(), null, mapServiceContext.getNodeEngine().getProperties()); this.nodeEngine = mapServiceContext.getNodeEngine(); this.mapServiceContext = mapServiceContext; this.partitionService = new MemberMinimalPartitionService(nodeEngine.getPartitionService()); this.partitionCount = partitionService.getPartitionCount(); this.invalidator = createInvalidator(); this.repairingTask = createRepairingInvalidationTask(); }
public MapNearCacheManager(MapServiceContext mapServiceContext) { super(mapServiceContext.getNodeEngine().getSerializationService(), mapServiceContext.getNodeEngine().getExecutionService().getGlobalTaskScheduler(), null, mapServiceContext.getNodeEngine().getProperties()); this.nodeEngine = mapServiceContext.getNodeEngine(); this.mapServiceContext = mapServiceContext; this.partitionService = new MemberMinimalPartitionService(nodeEngine.getPartitionService()); this.partitionCount = partitionService.getPartitionCount(); this.invalidator = createInvalidator(); this.repairingTask = createRepairingInvalidationTask(); }