private void throwPersistErrorIfExists() { if (persistError != null) { throw new RE(persistError, "Error while persisting"); } }
static PidCgroupEntry parse(String entry) { // For example, entries with a port number will have an extra `:` in it somewhere, or ipv6 addresses. final String[] parts = entry.split(Pattern.quote(":"), 3); if (parts.length != 3) { throw new RE("Bad entry [%s]", entry); } final Set<String> controllers = new HashSet<>(Arrays.asList(parts[1].split(Pattern.quote(",")))); final Path path = Paths.get(parts[2]); return new PidCgroupEntry(controllers, path); }
private ProcMountsEntry getMountEntry(final File procMounts, final String cgroup) { final List<String> lines; try { lines = Files.readLines(procMounts, StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); } for (final String line : lines) { final ProcMountsEntry entry = ProcMountsEntry.parse(line); if (CGROUP_TYPE.equals(entry.type) && entry.options.contains(cgroup)) { return entry; } } throw new RE("Cgroup [%s] not found", cgroup); }
private PidCgroupEntry getCgroupEntry(final File procCgroup, final String cgroup) { final List<String> lines; try { lines = Files.readLines(procCgroup, StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException(e); } for (final String line : lines) { if (line.startsWith("#")) { continue; } final PidCgroupEntry entry = PidCgroupEntry.parse(line); if (entry.controllers.contains(cgroup)) { return entry; } } throw new RE("Hierarchy for [%s] not found", cgroup); }
private static CpuAcctMetric parse(final List<String> lines) { // File has a header. We skip it // See src/test/resources/cpuacct.usage_all for an example final int ncpus = lines.size() - 1; final long[] usrTime = new long[ncpus]; final long[] sysTime = new long[ncpus]; for (int i = 1; i < lines.size(); i++) { final String[] splits = lines.get(i).split(CgroupUtil.SPACE_MATCH, 3); if (splits.length != 3) { throw new RE("Error parsing [%s]", lines.get(i)); } final int cpuNum = Integer.parseInt(splits[0]); usrTime[cpuNum] = Long.parseLong(splits[1]); sysTime[cpuNum] = Long.parseLong(splits[2]); } return new CpuAcctMetric(usrTime, sysTime); }
@Override public Path discover(final String cgroup) { Preconditions.checkNotNull(cgroup, "cgroup required"); final File procMounts = new File(procDir, "mounts"); final File pidCgroups = new File(procDir, "cgroup"); final PidCgroupEntry pidCgroupsEntry = getCgroupEntry(pidCgroups, cgroup); final ProcMountsEntry procMountsEntry = getMountEntry(procMounts, cgroup); final File cgroupDir = new File( procMountsEntry.path.toString(), pidCgroupsEntry.path.toString() ); if (cgroupDir.exists() && cgroupDir.isDirectory()) { return cgroupDir.toPath(); } throw new RE("Invalid cgroup directory [%s]", cgroupDir); }
public static byte[] hashPassword(final char[] password, final byte[] salt, final int iterations) { try { SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(ALGORITHM); SecretKey key = keyFactory.generateSecret( new PBEKeySpec( password, salt, iterations, KEY_LENGTH ) ); return key.getEncoded(); } catch (InvalidKeySpecException ikse) { log.error("WTF? invalid keyspec"); throw new RuntimeException("WTF? invalid keyspec", ikse); } catch (NoSuchAlgorithmException nsae) { log.error("%s not supported on this system.", ALGORITHM); throw new RE(nsae, "%s not supported on this system.", ALGORITHM); } }
boolean isSyncedSuccessfullyAtleastOnce() { try { return syncer.awaitInitialization(1, TimeUnit.MILLISECONDS); } catch (InterruptedException ex) { throw new RE( ex, "Interrupted while waiting for queryable server[%s] initial successful sync.", druidServer.getName() ); } }
private long checkQueryTimeout() { long timeLeft = timeoutAt - System.currentTimeMillis(); if (timeLeft <= 0) { String msg = StringUtils.format("Query[%s] url[%s] timed out.", query.getId(), url); setupResponseReadFailure(msg, null); throw new RE(msg); } else { return timeLeft; } }
if (map == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.sanitizeException(new RE("No lookups found"))) .build(); if (tierLookups == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.sanitizeException(new RE("Tier [%s] not found", tier))) .build();
if (map == null) { return Response.status(Response.Status.NOT_FOUND) .entity(ServletResourceUtils.sanitizeException(new RE("lookup [%s] not found", lookup))) .build();
private void checkTotalBytesLimit(long bytes) { if (maxScatterGatherBytes < Long.MAX_VALUE && totalBytesGathered.addAndGet(bytes) > maxScatterGatherBytes) { String msg = StringUtils.format( "Query[%s] url[%s] max scatter-gather bytes limit reached.", query.getId(), url ); setupResponseReadFailure(msg, null); throw new RE(msg); } } };
private InputStream dequeue() throws InterruptedException { final InputStreamHolder holder = queue.poll(checkQueryTimeout(), TimeUnit.MILLISECONDS); if (holder == null) { throw new RE("Query[%s] url[%s] timed out.", query.getId(), url); } final long currentQueuedByteCount = queuedByteCount.addAndGet(-holder.getLength()); if (usingBackpressure && currentQueuedByteCount < maxQueuedBytes) { long backPressureTime = Preconditions.checkNotNull(trafficCopRef.get(), "No TrafficCop, how can this be?") .resume(holder.getChunkNum()); channelSuspendedTime.addAndGet(backPressureTime); } return holder.getStream(); }
public void waitForInitialization() throws InterruptedException { if (!syncer.awaitInitialization(3 * syncer.getServerHttpTimeout(), TimeUnit.MILLISECONDS)) { throw new RE("Failed to sync with worker[%s].", worker.getHost()); } }
public HttpResponseStatus getProxiedOverlordScalingResponseStatus() { try { StatusResponseHolder response = makeRequest( HttpMethod.GET, StringUtils.format( "%s/druid/indexer/v1/scaling", coordinator ) ); return response.getStatus(); } catch (Exception e) { throw new RE(e, "Unable to get scaling status from [%s]", coordinator); } }
@Override public void stop() { try { final long unannounceDelay = config.getUnannouncePropagationDelay().toStandardDuration().getMillis(); if (unannounceDelay > 0) { log.info("Waiting %s ms for unannouncement to propagate.", unannounceDelay); Thread.sleep(unannounceDelay); } else { log.debug("Skipping unannounce wait."); } log.info("Stopping Jetty Server..."); server.stop(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RE(e, "Interrupted waiting for jetty shutdown."); } catch (Exception e) { log.warn(e, "Unable to stop Jetty server."); } } }
@Override public ServerView.CallbackAction serverRemoved(DruidServer server) { if (server.getName().equals("host:8080")) { serverRemovedCalled.countDown(); return ServerView.CallbackAction.CONTINUE; } else { throw new RE("Unknown server [%s]", server.getName()); } } }
throw new RE(ex, "Interrupted while waiting for queryable server initial successful sync.");
private void pushTaskFile(final File logFile, final String taskKey) throws IOException { FileInputStream fileStream = new FileInputStream(logFile); InputStreamContent mediaContent = new InputStreamContent("text/plain", fileStream); mediaContent.setLength(logFile.length()); try { RetryUtils.retry( (RetryUtils.Task<Void>) () -> { storage.insert(config.getBucket(), taskKey, mediaContent); return null; }, GoogleUtils::isRetryable, 1, 5 ); } catch (IOException e) { throw e; } catch (Exception e) { throw new RE(e, "Failed to upload [%s] to [%s]", logFile, taskKey); } }
@UsedInGeneratedCode @Override public void exitFunctionExpr(ExprParser.FunctionExprContext ctx) { String fnName = ctx.getChild(0).getText(); final List<Expr> args = ctx.getChildCount() > 3 ? (List<Expr>) nodes.get(ctx.getChild(2)) : Collections.emptyList(); Expr expr = macroTable.get(fnName, args); if (expr == null) { // Built-in functions. final Function function = Parser.getFunction(fnName); if (function == null) { throw new RE("function '%s' is not defined.", fnName); } expr = new FunctionExpr(function, fnName, args); } nodes.put(ctx, expr); }