@Override public void start() { executorService = new DelegateHealthStateRefresherExecutorService( Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder() .setDaemon(false) .setNameFormat("health_state_refresh-%d") .build())); healthStateRefresher = new HealthStateRefresher(executorService, nodeHealthProvider, new SharedHealthStateImpl(hzMember)); healthStateRefresher.start(); }
@Override public Set<NodeHealth> readAll() { long clusterTime = hzMember.getClusterTime(); long timeout = clusterTime - TIMEOUT_30_SECONDS; Map<String, TimestampedNodeHealth> sqHealthState = readReplicatedMap(); Set<String> hzMemberUUIDs = hzMember.getMemberUuids(); Set<NodeHealth> existingNodeHealths = sqHealthState.entrySet().stream() .filter(outOfDate(timeout)) .filter(ofNonExistentMember(hzMemberUUIDs)) .map(entry -> entry.getValue().getNodeHealth()) .collect(Collectors.toSet()); if (LOG.isTraceEnabled()) { LOG.trace("Reading {} and keeping {}", new HashMap<>(sqHealthState), existingNodeHealths); } return ImmutableSet.copyOf(existingNodeHealths); }
@Test public void readAll_ignores_NodeHealth_of_more_than_30_seconds_before_cluster_time() { NodeHealth nodeHealth = randomNodeHealth(); Map<String, TimestampedNodeHealth> map = new HashMap<>(); String memberUuid = randomAlphanumeric(5); TimestampedNodeHealth timestampedNodeHealth = new TimestampedNodeHealth(nodeHealth, clusterTime - 30 * 1000 - random.nextInt(99)); map.put(memberUuid, timestampedNodeHealth); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); when(hazelcastMember.getMemberUuids()).thenReturn(map.keySet()); when(hazelcastMember.getClusterTime()).thenReturn(clusterTime); assertThat(underTest.readAll()).isEmpty(); }
@Test public void write_put_arg_into_map_sq_health_state_under_current_client_uuid() { NodeHealth nodeHealth = randomNodeHealth(); Map<String, TimestampedNodeHealth> map = new HashMap<>(); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); long clusterTime = random.nextLong(); String uuid = randomAlphanumeric(5); when(hazelcastMember.getUuid()).thenReturn(uuid); when(hazelcastMember.getClusterTime()).thenReturn(clusterTime); underTest.writeMine(nodeHealth); assertThat(map.size()).isEqualTo(1); assertThat(map.get(uuid)).isEqualTo(new TimestampedNodeHealth(nodeHealth, clusterTime)); assertThat(logging.getLogs()).isEmpty(); }
@Test public void clearMine_clears_entry_into_map_sq_health_state_under_current_client_uuid() { Map<String, TimestampedNodeHealth> map = mock(Map.class); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); String uuid = randomAlphanumeric(5); when(hazelcastMember.getUuid()).thenReturn(uuid); underTest.clearMine(); verify(map).remove(uuid); verifyNoMoreInteractions(map); assertThat(logging.getLogs()).isEmpty(); }
@Override public void clearMine() { Map<String, TimestampedNodeHealth> sqHealthState = readReplicatedMap(); String clientUUID = hzMember.getUuid(); if (LOG.isTraceEnabled()) { LOG.trace("Reading {} and clearing for {}", new HashMap<>(sqHealthState), clientUUID); } sqHealthState.remove(clientUUID); }
@Test public void readAll_ignores_NodeHealth_of_30_seconds_before_cluster_time() { NodeHealth nodeHealth = randomNodeHealth(); Map<String, TimestampedNodeHealth> map = new HashMap<>(); String memberUuid = randomAlphanumeric(5); TimestampedNodeHealth timestampedNodeHealth = new TimestampedNodeHealth(nodeHealth, clusterTime - 30 * 1000); map.put(memberUuid, timestampedNodeHealth); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); when(hazelcastMember.getMemberUuids()).thenReturn(map.keySet()); when(hazelcastMember.getClusterTime()).thenReturn(clusterTime); assertThat(underTest.readAll()).isEmpty(); }
@Test public void write_logs_map_sq_health_state_content_and_NodeHealth_to_be_added_if_TRACE() { logging.setLevel(Level.TRACE); NodeHealth newNodeHealth = randomNodeHealth(); Map<String, TimestampedNodeHealth> map = new HashMap<>(); map.put(randomAlphanumeric(4), new TimestampedNodeHealth(randomNodeHealth(), random.nextLong())); doReturn(new HashMap<>(map)).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); String uuid = randomAlphanumeric(5); when(hazelcastMember.getUuid()).thenReturn(uuid); underTest.writeMine(newNodeHealth); assertThat(logging.getLogs()).hasSize(1); assertThat(logging.hasLog(Level.TRACE, "Reading " + map + " and adding " + newNodeHealth)).isTrue(); }
@Test public void clearMine_logs_map_sq_health_state_and_current_client_uuid_if_TRACE() { logging.setLevel(Level.TRACE); Map<String, TimestampedNodeHealth> map = new HashMap<>(); map.put(randomAlphanumeric(4), new TimestampedNodeHealth(randomNodeHealth(), random.nextLong())); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); String uuid = randomAlphanumeric(5); when(hazelcastMember.getUuid()).thenReturn(uuid); underTest.clearMine(); assertThat(logging.getLogs()).hasSize(1); assertThat(logging.hasLog(Level.TRACE, "Reading " + map + " and clearing for " + uuid)).isTrue(); }
@Override public void writeMine(NodeHealth nodeHealth) { requireNonNull(nodeHealth, "nodeHealth can't be null"); Map<String, TimestampedNodeHealth> sqHealthState = readReplicatedMap(); if (LOG.isTraceEnabled()) { LOG.trace("Reading {} and adding {}", new HashMap<>(sqHealthState), nodeHealth); } sqHealthState.put(hzMember.getUuid(), new TimestampedNodeHealth(nodeHealth, hzMember.getClusterTime())); }
@Override public void start() { executorService = new DelegateHealthStateRefresherExecutorService( Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder() .setDaemon(false) .setNameFormat("health_state_refresh-%d") .build())); healthStateRefresher = new HealthStateRefresher(executorService, nodeHealthProvider, new SharedHealthStateImpl(hzMember)); healthStateRefresher.start(); }
@Test public void readAll_logs_map_sq_health_state_content_and_the_content_effectively_returned_if_TRACE() { logging.setLevel(Level.TRACE); Map<String, TimestampedNodeHealth> map = new HashMap<>(); String uuid = randomAlphanumeric(44); NodeHealth nodeHealth = randomNodeHealth(); map.put(uuid, new TimestampedNodeHealth(nodeHealth, clusterTime - 1)); when(hazelcastMember.getClusterTime()).thenReturn(clusterTime); when(hazelcastMember.getMemberUuids()).thenReturn(singleton(uuid)); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); underTest.readAll(); assertThat(logging.getLogs()).hasSize(1); assertThat(logging.hasLog(Level.TRACE, "Reading " + new HashMap<>(map) + " and keeping " + singleton(nodeHealth))).isTrue(); }
@Test public void write_fails_with_NPE_if_arg_is_null() { expectedException.expect(NullPointerException.class); expectedException.expectMessage("nodeHealth can't be null"); underTest.writeMine(null); }
@Test public void readAll_returns_all_NodeHealth_in_map_sq_health_state_for_existing_client_uuids_aged_less_than_30_seconds() { NodeHealth[] nodeHealths = IntStream.range(0, 1 + random.nextInt(6)).mapToObj(i -> randomNodeHealth()).toArray(NodeHealth[]::new); Map<String, TimestampedNodeHealth> allNodeHealths = new HashMap<>(); Map<String, NodeHealth> expected = new HashMap<>(); String randomUuidBase = randomAlphanumeric(5); for (int i = 0; i < nodeHealths.length; i++) { String memberUuid = randomUuidBase + i; TimestampedNodeHealth timestampedNodeHealth = new TimestampedNodeHealth(nodeHealths[i], clusterTime - random.nextInt(30 * 1000)); allNodeHealths.put(memberUuid, timestampedNodeHealth); if (random.nextBoolean()) { expected.put(memberUuid, nodeHealths[i]); } } doReturn(allNodeHealths).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); when(hazelcastMember.getMemberUuids()).thenReturn(expected.keySet()); when(hazelcastMember.getClusterTime()).thenReturn(clusterTime); assertThat(underTest.readAll()) .containsOnly(expected.values().stream().toArray(NodeHealth[]::new)); assertThat(logging.getLogs()).isEmpty(); }
@Test public void readAll_logs_message_for_each_timed_out_NodeHealth_ignored_if_TRACE() { logging.setLevel(Level.TRACE); Map<String, TimestampedNodeHealth> map = new HashMap<>(); String memberUuid1 = randomAlphanumeric(44); String memberUuid2 = randomAlphanumeric(44); map.put(memberUuid1, new TimestampedNodeHealth(randomNodeHealth(), clusterTime - 30 * 1000)); map.put(memberUuid2, new TimestampedNodeHealth(randomNodeHealth(), clusterTime - 30 * 1000)); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); when(hazelcastMember.getMemberUuids()).thenReturn(ImmutableSet.of(memberUuid1, memberUuid2)); when(hazelcastMember.getClusterTime()).thenReturn(clusterTime); underTest.readAll(); assertThat(logging.getLogs()).hasSize(3); assertThat(logging.getLogs(Level.TRACE)) .containsOnly( "Reading " + new HashMap<>(map) + " and keeping []", "Ignoring NodeHealth of member " + memberUuid1 + " because it is too old", "Ignoring NodeHealth of member " + memberUuid2 + " because it is too old"); }
@Test public void readAll_logs_message_for_each_non_existing_member_ignored_if_TRACE() { logging.setLevel(Level.TRACE); Map<String, TimestampedNodeHealth> map = new HashMap<>(); String memberUuid1 = randomAlphanumeric(44); String memberUuid2 = randomAlphanumeric(44); map.put(memberUuid1, new TimestampedNodeHealth(randomNodeHealth(), clusterTime - 1)); map.put(memberUuid2, new TimestampedNodeHealth(randomNodeHealth(), clusterTime - 1)); when(hazelcastMember.getClusterTime()).thenReturn(clusterTime); doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE); underTest.readAll(); assertThat(logging.getLogs()).hasSize(3); assertThat(logging.getLogs(Level.TRACE)) .containsOnly( "Reading " + new HashMap<>(map) + " and keeping []", "Ignoring NodeHealth of member " + memberUuid1 + " because it is not part of the cluster at the moment", "Ignoring NodeHealth of member " + memberUuid2 + " because it is not part of the cluster at the moment"); }