/** {@inheritDoc} */ @Nullable @Override public final R reduce(List<GridComputeJobResult> results) throws GridException { try { return reduce0(results); } finally { if (debugState(g)) logFinish(g.log(), getClass(), start); } } }
/** * Parse all GGFS log files in specified log directory. * * @param logDir Folder were log files located. * @return List of line with aggregated information by files. */ private Collection<VisorGgfsProfilerEntry> parse(Path logDir, String ggfsName) throws IOException { Collection<VisorGgfsProfilerEntry> parsedFiles = new ArrayList<>(512); try (DirectoryStream<Path> dirStream = Files.newDirectoryStream(logDir)) { PathMatcher matcher = FileSystems.getDefault().getPathMatcher("glob:ggfs-log-" + ggfsName + "-*.csv"); for (Path p : dirStream) { if (matcher.matches(p.getFileName())) { try { parsedFiles.addAll(parseFile(p)); } catch (NoSuchFileException ignored) { // Files was deleted, skip it. } catch (Exception e) { g.log().warning("Failed to parse GGFS profiler log file: " + p, e); } } } } return parsedFiles; }
/** * Parse all GGFS log files in specified log directory. * * @param logDir Folder were log files located. * @return List of line with aggregated information by files. */ private Collection<VisorGgfsProfilerEntry> parse(Path logDir, String ggfsName) throws IOException { Collection<VisorGgfsProfilerEntry> parsedFiles = new ArrayList<>(512); try (DirectoryStream<Path> dirStream = Files.newDirectoryStream(logDir)) { PathMatcher matcher = FileSystems.getDefault().getPathMatcher("glob:ggfs-log-" + ggfsName + "-*.csv"); for (Path p : dirStream) { if (matcher.matches(p.getFileName())) { try { parsedFiles.addAll(parseFile(p)); } catch (NoSuchFileException ignored) { // Files was deleted, skip it. } catch (Exception e) { g.log().warning("Failed to parse GGFS profiler log file: " + p, e); } } } } return parsedFiles; }
/** {@inheritDoc} */ @Override public Cache getCache(String name) { assert grid != null; try { return new GridSpringCache(grid.log().getLogger(getClass()), name, grid.cache(name), null); } catch (IllegalArgumentException ignored) { return null; } }
/** {@inheritDoc} */ @Nullable @Override public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, @Nullable GridBiTuple<Set<UUID>, A> arg) throws GridException { assert arg != null; assert arg.get1() != null; start = U.currentTimeMillis(); boolean debug = debugState(g); if (debug) logStart(g.log(), getClass(), start); Set<UUID> nodeIds = arg.get1(); Map<GridComputeJob, GridNode> map = U.newHashMap(nodeIds.size()); try { taskArg = arg.get2(); for (GridNode node : subgrid) if (nodeIds.contains(node.id())) map.put(job(taskArg), node); return map; } finally { if (debug) logMapped(g.log(), getClass(), map.values()); } }
/** {@inheritDoc} */ @Nullable @Override public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, @Nullable GridBiTuple<Set<UUID>, VisorDataCollectorTaskArg> arg) throws GridException { assert arg != null; assert arg.get1() != null; start = U.currentTimeMillis(); boolean debug = debugState(g); if (debug) logStart(g.log(), getClass(), start); Collection<GridNode> nodes = g.nodes(); Map<GridComputeJob, GridNode> map = U.newHashMap(nodes.size()); try { taskArg = arg.get2(); // Collect data from ALL nodes. for (GridNode node : nodes) map.put(job(taskArg), node); return map; } finally { if (debug) logMapped(g.log(), getClass(), map.values()); } }
/** {@inheritDoc} */ @Nullable @Override public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, @Nullable GridBiTuple<Set<UUID>, Map<UUID, Collection<String>>> arg) throws GridException { assert arg != null; assert arg.get2() != null; start = U.currentTimeMillis(); boolean debug = debugState(g); if (debug) logStart(g.log(), getClass(), start); Set<UUID> nodeIds = arg.get2().keySet(); Map<GridComputeJob, GridNode> map = U.newHashMap(nodeIds.size()); try { taskArg = arg.get2(); for (GridNode node : g.nodes()) if (nodeIds.contains(node.id())) map.put(new VisorQueryCleanupJob(taskArg.get(node.id())), node); return map; } finally { if (debug) logMapped(g.log(), getClass(), map.values()); } }
/** {@inheritDoc} */ @Override protected VisorDataCollectorJobResult run(VisorDataCollectorTaskArg arg) throws GridException { VisorDataCollectorJobResult res = new VisorDataCollectorJobResult(); res.gridName = g.name(); res.topologyVersion = g.topologyVersion(); long start0 = U.currentTimeMillis(); events(res, arg); if (debug) start0 = log(g.log(), "Collected events", getClass(), start0); license(res); if (debug) start0 = log(g.log(), "Collected license", getClass(), start0); caches(res, arg); if (debug) start0 = log(g.log(), "Collected caches", getClass(), start0); ggfs(res); if (debug) start0 = log(g.log(), "Collected ggfs", getClass(), start0); streamers(res); if (debug) start0 = log(g.log(), "Collected streamers", getClass(), start0); dr(res); if (debug) log(g.log(), "Collected DR", getClass(), start0); // TODO: gg-mongo mongo(res); return res; }
notDeleted++; g.log().warning("Profiler log file was not deleted: " + p, io);
notDeleted++; g.log().warning("Profiler log file was not deleted: " + p, io);
/** {@inheritDoc} */ @Nullable @Override public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, @Nullable GridBiTuple<Set<UUID>, Map<UUID, Collection<String>>> arg) throws GridException { assert arg != null; assert arg.get2() != null; start = U.currentTimeMillis(); boolean debug = debugState(g); if (debug) logStart(g.log(), getClass(), start); Set<UUID> nodeIds = arg.get2().keySet(); Map<GridComputeJob, GridNode> map = U.newHashMap(nodeIds.size()); try { taskArg = arg.get2(); for (GridNode node : g.nodes()) if (nodeIds.contains(node.id())) map.put(new VisorClearQueryResultJob(taskArg.get(node.id())), node); return map; } finally { if (debug) logMapped(g.log(), getClass(), map.values()); } }
/** Collect caches. */ private void caches(VisorDataCollectorJobResult res, VisorDataCollectorTaskArg arg) { try { for (GridCache cache : g.cachesx()) { long start0 = U.currentTimeMillis(); try { res.caches.add(VisorCache.from(g, cache, arg.sample)); } finally { if (debug) log(g.log(), "Collected cache: " + cache.name(), getClass(), start0); } } } catch(Throwable cachesEx) { res.cachesEx = cachesEx; } }
/** Collect streamers. */ private void streamers(VisorDataCollectorJobResult res) { try { GridStreamerConfiguration[] cfgs = g.configuration().getStreamerConfiguration(); if (cfgs != null) { for (GridStreamerConfiguration cfg : cfgs) { long start0 = U.currentTimeMillis(); try { res.streamers.add(VisorStreamer.from(g.streamer(cfg.getName()))); } finally { if (debug) log(g.log(), "Collected streamer: " + cfg.getName(), getClass(), start0); } } } } catch(Throwable streamersEx) { res.streamersEx = streamersEx; } }
/** Collect GGFS. */ private void ggfs(VisorDataCollectorJobResult res) { try { GridGgfsProcessorAdapter ggfsProc = ((GridKernal)g).context().ggfs(); for (GridGgfs ggfs : ggfsProc.ggfss()) { long start0 = U.currentTimeMillis(); try { Collection<GridIpcServerEndpoint> endPoints = ggfsProc.endpoints(ggfs.name()); if (endPoints != null) { for (GridIpcServerEndpoint ep : endPoints) if (ep.isManagement()) res.ggfsEndpoints.add(new VisorGgfsEndpoint(ggfs.name(), g.name(), ep.getHost(), ep.getPort())); } res.ggfss.add(VisorGgfs.from(ggfs)); } finally { if (debug) log(g.log(), "Collected GGFS: " + ggfs.name(), getClass(), start0); } } } catch(Throwable ggfssEx) { res.ggfssEx = ggfssEx; } }
/** {@inheritDoc} */ @Override public Cache getCache(final String name) { Cache cache = super.getCache(name); if (cache != null) return cache; try { MetaKey key = new MetaKey(name); cache = metaCache.get(key); if (cache == null) { cache = new GridSpringCache(grid.log().getLogger(getClass()), name, dataCache.projection(new ProjectionFilter(name)), new GridClosure<Object, Object>() { @Override public Object apply(Object o) { return new DataKey(name, o); } }); Cache old = metaCache.putIfAbsent(key, cache); if (old != null) cache = old; } return cache; } catch (GridException e) { throw new GridRuntimeException(e); } }