/** * Will attempt to re-open index writer to recover from a specific exception. The method * assumes the caller has acquired the writer semaphore */ private void checkFailureAndRecover(Exception e) { if (getHost().isStopping()) { logInfo("Exception after host stop, on index service thread: %s", e); return; } logSevere("Exception on index service thread: %s", Utils.toString(e)); adjustStat(STAT_NAME_WRITER_ALREADY_CLOSED_EXCEPTION_COUNT, 1); }
switch (task.taskInfo.stage) { case CREATED: logWarning("Task %s is in invalid state: %s", task.taskInfo.stage); op.fail(new IllegalStateException("Stage not supported")); return true; clonedTask.querySpec = task.querySpec; clonedTask.querySpec.context.filter = QueryFilter.create(qs.query); clonedTask.querySpec.context.subjectLink = getSubject(op); this.activeQueries.put(task.documentSelfLink, clonedTask); adjustTimeSeriesStat(STAT_NAME_ACTIVE_QUERY_FILTERS, AGGREGATION_TYPE_SUM, 1); logInfo("Activated continuous query task: %s", task.documentSelfLink); break; case CANCELLED: case FINISHED: if (this.activeQueries.remove(task.documentSelfLink) != null) { adjustTimeSeriesStat(STAT_NAME_ACTIVE_QUERY_FILTERS, AGGREGATION_TYPE_SUM, -1);
private void expireServicesAsOwner(Operation maintOp, long deadline) { try { applyDocumentExpirationPolicy(deadline); } catch (Throwable e) { logWarning("Expiration failed: %s", e); } maintOp.complete(); }
private void adjustTimeSeriesStat(String name, EnumSet<AggregationType> type, double delta) { if (!hasOption(ServiceOption.INSTRUMENTATION)) { return; } ServiceStat dayStat = ServiceStatUtils.getOrCreateDailyTimeSeriesStat(this, name, type); adjustStat(dayStat, delta); ServiceStat hourStat = ServiceStatUtils.getOrCreateHourlyTimeSeriesStat(this, name, type); adjustStat(hourStat, delta); }
request.key = getSelfLink(); .setReferer(getHost().getUri()) .setBodyNoCloning(request) .setCompletion((op, t) -> { if (t != null) { logWarning("owner selection failed: %s", t); maintOp.fail(t); return; rsp = Utils.fromJson(r.getValue(), NodeSelectorService.SelectOwnerResponse.class); } catch (Exception e) { logWarning("Exception thrown in de-serializing json response: %s", e); logWarning("%s responded with '%s'", r.getKey(), r.getValue()); } else if (!rsp.ownerNodeId.equals(getHost().getId())) { logWarning("SelectOwner response from %s does not indicate that " + "local node %s is the owner for factory %s. JsonResponse: %s", r.getKey(), getHost().getId(), getSelfLink(), r.getValue()); maintOp.complete(); return; logFine("%s elected as owner for %s. Starting expire ...", getHost().getId(), getSelfLink()); expireServicesAsOwner(maintOp, deadline); }); getHost().broadcastRequest(ServiceUriPaths.DEFAULT_NODE_SELECTOR, getSelfLink(), true, broadcastSelectOp);
private void deleteAllDocumentsForSelfLink(Connection conn, String tableName, Operation postOrDelete, String link, ServiceDocument state) throws Exception { dao.deleteDocument(conn, tableName, link); postOrDelete.complete(); adjustTimeSeriesStat(STAT_NAME_SERVICE_DELETE_COUNT, AGGREGATION_TYPE_SUM, 1); logFine("%s expired", link); if (state == null) { return; } applyActiveQueries(postOrDelete, state, null); // remove service, if its running // Broadcasting delete to all nodes, to make sure owner node stop the service // TODO: Find better solution, all nodes query for expiration and stop if service owner? // TODO: Consider skipping delete for IMMUTABLE and non-periodic services, since they will // stop on idle // TODO: Why handleDelete is not called? Noticed same behavior with lucene Operation delete = Operation.createDelete(this, state.documentSelfLink) .setBodyNoCloning(state) .disableFailureLogging(true) .addPragmaDirective(Operation.PRAGMA_DIRECTIVE_NO_INDEX_UPDATE) .setReferer(getUri()); getHost().broadcastRequest(ServiceUriPaths.DEFAULT_NODE_SELECTOR, false, delete); }
private void handleUpdateRequest() { Operation op = pollUpdateOperation(); if (op == null) { return; switch (op.getAction()) { case DELETE: handleDeleteImpl(op); break; case POST: if (o != null) { if (o instanceof UpdateIndexRequest) { updateIndex(op); break; handleMaintenanceImpl(op); break; checkFailureAndRecover(e); op.fail(e); } finally {
getHost().getOperationTimeoutMicros())); selectOwnerOp.setCompletion((o, e) -> { OperationContext.restoreOperationContext(opContext); if (e != null) { logWarning("owner selection failed: %s", e); maintOp.fail(e); return; verifyFactoryOwnership(maintOp, deadline); return; expireServicesAsOwner(maintOp, deadline); }); getHost().selectOwner(ServiceUriPaths.DEFAULT_NODE_SELECTOR, getSelfLink(), selectOwnerOp);
private void handleQueryRequest() { Operation op = pollQueryOperation(); if (op == null) { return; Operation.failActionNotSupported(op); } else { handleGetImpl(op); if (sd.documentKind.equals(QueryTask.KIND)) { QueryTask task = (QueryTask) sd; handleQueryTaskPatch(op, task); break; handleDeleteRuntimeContext(op); break; checkFailureAndRecover(e); op.fail(e); } finally {
private ServiceDocument getDocument(String selfLink) throws Exception { long startNanos = System.nanoTime(); ServiceDocument doc = dao.loadDocument(selfLink); if (hasOption(ServiceOption.INSTRUMENTATION)) { long durationNanos = System.nanoTime() - startNanos; setTimeSeriesHistogramStat(STAT_NAME_QUERY_SINGLE_DURATION_MICROS, AGGREGATION_TYPE_AVG_MAX, TimeUnit.NANOSECONDS.toMicros(durationNanos)); String factoryLink = UriUtils.getParentPath(selfLink); if (factoryLink != null) { String statKey = String .format(STAT_NAME_SINGLE_QUERY_BY_FACTORY_COUNT_FORMAT, factoryLink); adjustStat(statKey, 1); } } return doc; }
visitor, documentSelfLink); } catch (Exception e) { logWarning("Error getting state for %s: %s", documentSelfLink, e); deleteAllDocumentsForSelfLink(conn, tableName, dummyDelete, documentSelfLink, serviceDocument); adjustTimeSeriesStat(STAT_NAME_DOCUMENT_EXPIRATION_COUNT, AGGREGATION_TYPE_SUM, 1);
@Override protected Service createDefaultDocumentIndexService() { if (!this.enablePostgres) { return super.createDefaultDocumentIndexService(); } setRemotePersistence(true); this.postgresDocumentIndexService = new PostgresDocumentIndexService(this, getDataSource()); return this.postgresDocumentIndexService; }
private void queryServiceHost(String selfLink, EnumSet<QueryOption> options, Operation op) { if (options.contains(QueryOption.EXPAND_CONTENT)) { // the index writers had no results, ask the host a simple prefix query // for the services, and do a manual expand op.nestCompletion(o -> { expandLinks(o, op); }); } getHost().queryServiceUris(selfLink, op); }
adjustStat(STAT_NAME_FORCED_UPDATE_DOCUMENT_DELETE_COUNT, 1); applyActiveQueries(updateOp, s, desc);
private void handleDeleteRuntimeContext(Operation op) { op.complete(); adjustTimeSeriesStat(STAT_NAME_PAGINATED_SEARCHER_FORCE_DELETION_COUNT, AGGREGATION_TYPE_SUM, 1); }
private void applyDocumentExpirationPolicy(long deadline) throws Exception { // TODO: need better solution to expire documents, this can be very slow to have // deletion in batches across tables int limit = expiredDocumentSearchThreshold; long now = Utils.getNowMicrosUtc(); for (TableDescription tableDescription : this.dao.getPostgresSchemaManager().getTableDescriptions()) { if (Utils.getSystemNowMicrosUtc() >= deadline || limit <= 0) { break; } int expired = applyDocumentExpirationPolicyForTable(tableDescription, now, deadline, limit); limit -= expired; } }
@Override protected Service createDefaultDocumentIndexService() { if (!enablePostgres) { return super.createDefaultDocumentIndexService(); } DataSource ds = getDataSource(); if (cleanDbOnStartup) { try { TestUtils.deleteAll(ds); } catch (SQLException e) { log(Level.SEVERE, "Failed to delete tables in database: %s", e); throw new IllegalStateException(e); } } setRemotePersistence(true); this.postgresDocumentIndexService = new PostgresDocumentIndexService(this, ds); return this.postgresDocumentIndexService; }