public void handleDeleteImpl(Operation delete) throws Exception { setProcessingStage(ProcessingStage.STOPPED); this.privateIndexingExecutor.shutdown(); this.privateQueryExecutor.shutdown(); getHost().stopService(this); delete.complete(); }
private void queryServiceHost(String selfLink, EnumSet<QueryOption> options, Operation op) { if (options.contains(QueryOption.EXPAND_CONTENT)) { // the index writers had no results, ask the host a simple prefix query // for the services, and do a manual expand op.nestCompletion(o -> { expandLinks(o, op); }); } getHost().queryServiceUris(selfLink, op); }
@Override public void handleStart(final Operation post) { super.setMaintenanceIntervalMicros(getHost().getMaintenanceIntervalMicros() * 5); // index service getUri() will be invoked on every load and save call for every operation, // so its worth caching (plus we only have a very small number of index services this.uri = post.getUri(); ExecutorService es = new ThreadPoolExecutor(QUERY_THREAD_COUNT, QUERY_THREAD_COUNT, 1, TimeUnit.MINUTES, new ArrayBlockingQueue<>(QUERY_EXECUTOR_WORK_QUEUE_CAPACITY), new NamedThreadFactory(getUri() + "/queries")); this.privateQueryExecutor = TracingExecutor.create(es, getHost().getTracer()); es = new ThreadPoolExecutor(UPDATE_THREAD_COUNT, UPDATE_THREAD_COUNT, 1, TimeUnit.MINUTES, new ArrayBlockingQueue<>(UPDATE_EXECUTOR_WORK_QUEUE_CAPACITY), new NamedThreadFactory(getUri() + "/updates")); this.privateIndexingExecutor = TracingExecutor.create(es, getHost().getTracer()); post.complete(); }
private void handleQueryTaskPatch(Operation op, QueryTask task) throws Exception { if (task.querySpec.options.contains(QueryOption.CONTINUOUS)) { if (handleContinuousQueryTaskPatch(op, task, task.querySpec)) { return; } // intentional fall through for tasks just starting and need to execute a query } ServiceDocumentQueryResult rsp = dao.queryDocuments(op, task); if (rsp == null) { rsp = new ServiceDocumentQueryResult(); rsp.queryTimeMicros = 0L; rsp.documentOwner = getHost().getId(); rsp.documentCount = 0L; if (task.querySpec.options.contains(QueryOption.EXPAND_CONTENT)) { rsp.documents = Collections.emptyMap(); } } op.setBodyNoCloning(rsp).complete(); }
/** * Will attempt to re-open index writer to recover from a specific exception. The method * assumes the caller has acquired the writer semaphore */ private void checkFailureAndRecover(Exception e) { if (getHost().isStopping()) { logInfo("Exception after host stop, on index service thread: %s", e); return; } logSevere("Exception on index service thread: %s", Utils.toString(e)); adjustStat(STAT_NAME_WRITER_ALREADY_CLOSED_EXCEPTION_COUNT, 1); }
.setReferer(getHost().getUri()) .setBodyNoCloning(request) .setCompletion((op, t) -> { } else if (!rsp.ownerNodeId.equals(getHost().getId())) { logWarning("SelectOwner response from %s does not indicate that " + "local node %s is the owner for factory %s. JsonResponse: %s", r.getKey(), getHost().getId(), getSelfLink(), r.getValue()); maintOp.complete(); return; getHost().getId(), getSelfLink()); expireServicesAsOwner(maintOp, deadline); }); getHost().broadcastRequest(ServiceUriPaths.DEFAULT_NODE_SELECTOR, getSelfLink(), true, broadcastSelectOp);
if (get.isRemote() && getHost().isAuthorizationEnabled()) { get.nestCompletion((op, ex) -> { if (ex != null) { if (!QueryFilterUtils.evaluate(queryFilter, doc, getHost())) { get.fail(Operation.STATUS_CODE_FORBIDDEN); return; ServiceDocumentQueryResult rsp = dao.queryBySelfLinkPrefix(get, selfLink, options); if (rsp != null) { rsp.documentOwner = getHost().getId(); get.setBodyNoCloning(rsp).complete(); return; rsp.documents = new HashMap<>(); rsp.documentOwner = getHost().getId(); rsp.documentCount = 0L; get.setBodyNoCloning(rsp).complete();
private String getSubject(Operation op) { if (op.getAuthorizationContext() != null && op.getAuthorizationContext().isSystemUser()) { return SystemUserService.SELF_LINK; } if (getHost().isAuthorizationEnabled()) { return op.getAuthorizationContext().getClaims().getSubject(); } return GuestUserService.SELF_LINK; }
if (getHost().isStopping()) { op.fail(new CancellationException("Host is stopping")); return;
private void deleteAllDocumentsForSelfLink(Connection conn, String tableName, Operation postOrDelete, String link, ServiceDocument state) throws Exception { dao.deleteDocument(conn, tableName, link); postOrDelete.complete(); adjustTimeSeriesStat(STAT_NAME_SERVICE_DELETE_COUNT, AGGREGATION_TYPE_SUM, 1); logFine("%s expired", link); if (state == null) { return; } applyActiveQueries(postOrDelete, state, null); // remove service, if its running // Broadcasting delete to all nodes, to make sure owner node stop the service // TODO: Find better solution, all nodes query for expiration and stop if service owner? // TODO: Consider skipping delete for IMMUTABLE and non-periodic services, since they will // stop on idle // TODO: Why handleDelete is not called? Noticed same behavior with lucene Operation delete = Operation.createDelete(this, state.documentSelfLink) .setBodyNoCloning(state) .disableFailureLogging(true) .addPragmaDirective(Operation.PRAGMA_DIRECTIVE_NO_INDEX_UPDATE) .setReferer(getUri()); getHost().broadcastRequest(ServiceUriPaths.DEFAULT_NODE_SELECTOR, false, delete); }