public MetricsCollectorConfig(String topoName, Map<String, Object> topoConfig) { this.topoConfig = topoConfig; String labelStr = (String) topoConfig.get("benchmark.label"); this.name = topoName; if (labelStr == null) { LOG.warn("'benchmark.label' not found in config. Defaulting to topology name"); labelStr = this.name; } this.label = labelStr; } } // MetricsCollectorConfig
private void logFailureAndWait(String action, String cause, int attempt, Exception e) throws InterruptedException { String retryMessage; if (attempt < maxAttempts) { retryMessage = ", will wait " + waitBetweenRetries + " ms until next retry."; } else { retryMessage = ", no further attempts will be performed."; } String fullMessage = "Failed to " + action + " because " + cause + ". Attempt #" + attempt + "/" + maxAttempts + retryMessage; if (e == null) { logger.warn(fullMessage); } else { logger.error(fullMessage, e); } Thread.sleep(waitBetweenRetries); }
static void roll() { try { Socket socket = new Socket(host, port); DataOutputStream dos = new DataOutputStream(socket.getOutputStream()); DataInputStream dis = new DataInputStream(socket.getInputStream()); dos.writeUTF(ExternallyRolledFileAppender.ROLL_OVER); String rc = dis.readUTF(); if(ExternallyRolledFileAppender.OK.equals(rc)) { cat.info("Roll over signal acknowledged by remote appender."); } else { cat.warn("Unexpected return code "+rc+" from remote entity."); System.exit(2); } } catch(IOException e) { cat.error("Could not send roll signal on host "+host+" port "+port+" .", e); System.exit(2); } System.exit(0); } }
logger.debug(String.format("Reading lexicon from '%s'", lexiconFilename)); String line; Map<String, List<String>> fLexicon = new HashMap<String, List<String>>(); phonStr = lineParts[1]; } catch (ArrayIndexOutOfBoundsException e) { logger.warn(String.format("Lexicon '%s': missing transcription for '%s'", lexiconFilename, graphStr)); continue; allophoneSet.splitIntoAllophones(phonStr); } catch (IllegalArgumentException e) { logger.warn(String.format("Lexicon '%s': invalid entry for '%s': %s", lexiconFilename, graphStr, e.getMessage())); continue; List<String> transcriptions = fLexicon.get(graphStr); if (null == transcriptions) { transcriptions = new ArrayList<String>(); fLexicon.put(graphStr, transcriptions); transcriptions.add(phonPosStr);
adminClients = new ArrayList<AdminClient>(urls.size()); Map<String, Cluster> clusterMap = new HashMap<String, Cluster>(urls.size()); Map<String, StoreDefinition> storeDefinitionMap = new HashMap<String, StoreDefinition>(urls.size()); logger.info("Connecting to bootstrap server: " + url); adminClients.add(adminClient); clusterMap.put(url, cluster); StoreDefinition storeDefinition = StoreDefinitionUtils.getStoreDefinitionWithName(storeDefinitions.getValue(), storeName); storeDefinitionMap.put(url, storeDefinition); logger.error("Partition count of different clusters is not the same: " + partitionCount + " vs " + currentPartitionCount); throw new VoldemortException("Will not connect because partition counts differ among clusters."); StoreDefinition storeDefinition = storeDefinitionMap.get(url); Cluster cluster = clusterMap.get(url); Map<Integer, Integer> partitionToNodeMap = cluster.getPartitionIdToNodeIdMap(); logger.info(line); retentionDays = storeRetentionDays; logger.warn("Retention-days is not consistent between clusters by urls. Will use the shorter.");
Document doc = documentCache.get(docNum); if (logger.isDebugEnabled()) logger.debug("Found doc in cache"); keyMap.put(docNum, CassandraUtils.hashKeyBytes(indexName.getBytes("UTF-8"), CassandraUtils.delimeterBytes, Integer .toHexString(docNum).getBytes("UTF-8"))); fieldNames = ((SolandraFieldSelector) selector).getFieldNames(); continue; if (fieldNames == null || fieldNames.size() == 0) .add(new SliceByNamesReadCommand(CassandraUtils.keySpace, key, columnParent, fieldNames)); for (Row row : rows) rowMap.put(row.key.key, row); Row row = rowMap.get(key.getValue()); logger.warn("Missing document in multiget_slice for: " + ByteBufferUtil.string(key.getValue(), CassandraUtils.UTF_8) + " " + rowMap); logger.warn("Filtering out __META__ key"); continue;
Map<String, Long> storeToMaxVersion = Maps.newHashMapWithExpectedSize(storeNames.size()); for(String storeName: storeNames) { storeToMaxVersion.put(storeName, 0L); Map<String, Long> currentNodeVersions = getROMaxVersion(node.getId(), storeNames); for(String storeName: currentNodeVersions.keySet()) { Long maxVersion = storeToMaxVersion.get(storeName); if(maxVersion != null && maxVersion < currentNodeVersions.get(storeName)) { storeToMaxVersion.put(storeName, currentNodeVersions.get(storeName)); nodeFailures++; if (nodeFailures > maxNodeFailures) { logger.error("Got an exception while trying to reach node " + node.getId() + ". " + nodeFailures + " node failure(s) so far; maxNodeFailures exceeded, rethrowing."); throw e; } else { logger.warn("Got an exception while trying to reach node " + node.getId() + ". " + nodeFailures + " node failure(s) so far; continuing.", e);
logger.warn("Get operation timed out after " + timeoutMs + " ms."); continue; throw (VoldemortException) getResult.exception; failures.add(getResult.exception); continue; retrieved.add(getResult); } catch(InterruptedException e) { throw new InsufficientOperationalNodesException("Get operation interrupted!", e); throw (Error) e.getCause(); else logger.error(e.getMessage(), e); while(successes < this.storeDef.getPreferredReads() && nodeIndex < nodes.size()) { Node node = nodes.get(nodeIndex); long startNs = System.nanoTime(); fetcher.execute(innerStores.get(node.getId()), key, transforms), null)); throw e; } catch(Exception e) { logger.warn("Error in GET on node " + node.getId() + "(" + node.getHost() + ")", e);
if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); log.info("Executing query: " + query); boolean result = statement.execute(query); if (result) { ResultSet resultSet = statement.getResultSet(); if (resultSet.next()) { rowCounts.add(resultSet.getLong(1)); log.warn("Query output for: " + query + " : " + result); closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); statement.close(); } catch (SQLException e) { log.warn("Could not close Hive statement", e);
logger.error("Cannot run slop pusher job since Voldemort server is rebalancing"); return; logger.info("Started streaming slop pusher job at " + startTime); attemptedByNode.put(node.getId(), 0L); succeededByNode.put(node.getId(), 0L); logger.info("Attempted pushing " + attemptedPushes + " slops"); logger.warn("Interrupted exception", e); terminatedEarly = true; } catch(Exception e) { logger.error(e, e); terminatedEarly = true; } finally { iterator.close(); } catch(Exception e) { logger.warn("Failed to close iterator cleanly as database might be closed", e); slopQueue.put(END); } catch(InterruptedException e) { logger.warn("Error putting poison pill", e); result.get(); } catch(Exception e) { logger.warn("Exception in consumer", e);
public Map<Integer, Long> byZone() { Map<Integer, Long> map = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes()); for(Map.Entry<Integer, AtomicLong> entry: values.entrySet()) { try { Node node = cluster.getNodeById(entry.getKey()); int zoneId = node.getZoneId(); Long count = map.get(zoneId); if(count == null) count = 0L; count += entry.getValue().get(); map.put(zoneId, count); } catch(VoldemortException e) { logger.warn("Can't get zone information for node id " + entry.getKey(), e); } } return Collections.unmodifiableMap(map); }
@Override public void run() { long startNs = System.nanoTime(); try { boolean deleted = innerStores.get(node.getId()).delete(key, version); successes.incrementAndGet(); deletedSomething.compareAndSet(false, deleted); recordSuccess(node, startNs); } catch(UnreachableStoreException e) { failures.add(e); recordException(node, startNs, e); } catch(VoldemortApplicationException e) { throw e; } catch(Exception e) { failures.add(e); logger.warn("Error in DELETE on node " + node.getId() + "(" + node.getHost() + ")", e); } finally { // signal that the operation is complete semaphore.release(); } } });
if(logger.isDebugEnabled()) logger.debug("Trying to send hint to " + nodeId + " for key " + slop.getKey()); Store<ByteArray, Slop, byte[]> slopStore = slopStores.get(nodeId); Utils.notNull(slopStore); long startNs = System.nanoTime(); if(logger.isDebugEnabled()) logger.debug("Slop attempt to write " + slop.getKey() + " (keyRef: " + System.identityHashCode(slop.getKey()) + ") for " + failedNode + " to node " + node); failureDetector.recordException(node, (System.nanoTime() - startNs) / Time.NS_PER_MS, e); logger.warn("Error during hinted handoff. Will try another node", e); } catch(IllegalStateException e) { logger.warn("Error during hinted handoff. Will try another node", e); } catch(ObsoleteVersionException e) { logger.debug(e, e); } catch(Exception e) { logger.error("Unknown exception. Will try another node" + e); if(logger.isDebugEnabled()) logger.debug("Slop write of key " + slop.getKey() + " (keyRef: " + System.identityHashCode(slop.getKey()) + ") for " + failedNode logger.error("Slop write of key " + slop.getKey() + " (keyRef: " + System.identityHashCode(slop.getKey()) + ") for " + failedNode + " was not written.");
private void commitToVoldemort(List<String> storeNamesToCommit) { if(logger.isDebugEnabled()) { logger.debug("Trying to commit to Voldemort"); if(nodesToStream == null || nodesToStream.size() == 0) { if(logger.isDebugEnabled()) { logger.debug("No nodes to stream to. Returning."); logger.error("Exception during commit", e); hasError = true; if(!faultyNodes.contains(node.getId())) faultyNodes.add(node.getId()); logger.warn("StreamingSession may not have been initialized since Variable streamingresults is null. Skipping callback "); return; logger.info("Invoking the Recovery Callback"); Future future = streamingresults.submit(recoveryCallback); try { if(logger.isDebugEnabled()) { logger.debug("Commit successful"); logger.debug("calling checkpoint callback"); logger.warn("Checkpoint callback failed!", e1); } catch(ExecutionException e1) { logger.warn("Checkpoint callback failed during execution!", e1);
@Override public void report_task_error(String stormId, int taskId, Throwable error) { // add by ourself LOG.warn(StormUtils.stringify_error(error)); String path = Cluster.taskerror_path(stormId, taskId); cluster_state.mkdirs(path); List<Integer> children = new ArrayList<Integer>(); for (String str : cluster_state.get_children(path, false)) { children.add(Integer.parseInt(str)); } Collections.sort(children); while (children.size() >= 10) { cluster_state.delete_node(path + "/" + children.remove(0)); } String timestampPath = path + "/" + TimeUtils.current_time_secs(); byte[] errorData = new String(StormUtils.stringify_error(error)) .getBytes(); cluster_state.set_data(timestampPath, errorData); }
VectorClock clock = new VectorClock(); if(metadataCache.containsKey(ROUTING_STRATEGY_KEY)) clock = (VectorClock) metadataCache.get(ROUTING_STRATEGY_KEY).getVersion(); logger.info("Updating routing strategy for all stores"); HashMap<String, StoreDefinition> storeDefMap = makeStoreDefinitionMap(storeDefs); HashMap<String, RoutingStrategy> routingStrategyMap = createRoutingStrategyMap(cluster, storeDefMap); this.metadataCache.put(ROUTING_STRATEGY_KEY, new Versioned<Object>(routingStrategyMap, clock.incremented(getNodeId(), if(updatedRoutingStrategy != null) { try { for(MetadataStoreListener listener: storeNameTolisteners.get(storeName)) { listener.updateRoutingStrategy(updatedRoutingStrategy); listener.updateStoreDefinition(storeDefMap.get(storeName)); logger.warn(e, e);
fileSet.getIndexFileSize(chunk)); if(valueLocation >= 0) keysAndValueLocations.add(new KeyValueLocation(chunk, key, valueLocation)); } catch (IllegalStateException e) { logger.warn("ChunkedFileSet.getChunkForKey() did not execute successfully for store '" + getName() + "'. Skipping key in getAll: " + ByteUtils.toHexString(key.get()), e); continue; keyVal.getValueLocation()); if(value.length > 0) results.put(keyVal.getKey(), Collections.singletonList(Versioned.value(value)));
logger.error("Cannot stream more entries since Recovery Callback Failed!"); throw new VoldemortException("Cannot stream more entries since Recovery Callback Failed! You Need to restart the session"); if(blackListedNodes != null && blackListedNodes.size() > 0) { if(blackListedNodes.contains(node.getId())) continue; nodesWithException++; if(!faultyNodes.contains(node.getId())) faultyNodes.add(node.getId()); logger.warn("Invoking the Recovery Callback"); Future future = streamingresults.submit(recoveryCallback); try { logger.error("Recovery Callback failed", e1); throw new VoldemortException("Recovery Callback failed"); } catch(ExecutionException e1) { MARKED_BAD = true; logger.error("Recovery Callback failed during execution", e1); throw new VoldemortException("Recovery Callback failed during execution");
@Override public List<String> getURLsFromPage(Document doc) { List<String> imageURLs = new ArrayList<>(); for (Element thumb : doc.select("div#gallery > div > a")) { String imageURL = thumb.attr("href"); try { Document imagedoc = new Http("http://imagearn.com/" + imageURL).get(); String image = imagedoc.select("a.thickbox").first().attr("href"); imageURLs.add(image); } catch (IOException e) { LOGGER.warn("Was unable to download page: " + imageURL); } } return imageURLs; }