public Callable<?> getInitializationTask() { // if we're just linking in the index on an already-built index post-restart or if the base // table is empty we've nothing to do. Otherwise, submit for building via SecondaryIndexBuilder return isBuilt() || baseCfs.isEmpty() ? null : getBuildIndexTask(); }
public Callable<?> getInitializationTask() { // if we're just linking in the index on an already-built index post-restart or if the base // table is empty we've nothing to do. Otherwise, submit for building via SecondaryIndexBuilder return isBuilt() || baseCfs.isEmpty() ? null : getBuildIndexTask(); }
public void migrate() { // nothing to migrate if (legacyHintsTable.isEmpty()) return; logger.info("Migrating legacy hints to new storage"); // major-compact all of the existing sstables to get rid of the tombstones + expired hints logger.info("Forcing a major compaction of {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS); compactLegacyHints(); // paginate over legacy hints and write them to the new storage logger.info("Writing legacy hints to the new storage"); migrateLegacyHints(); // truncate the legacy hints table logger.info("Truncating {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS); legacyHintsTable.truncateBlocking(); }
public void migrate() { // nothing to migrate if (legacyHintsTable.isEmpty()) return; logger.info("Migrating legacy hints to new storage"); // major-compact all of the existing sstables to get rid of the tombstones + expired hints logger.info("Forcing a major compaction of {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS); compactLegacyHints(); // paginate over legacy hints and write them to the new storage logger.info("Writing legacy hints to the new storage"); migrateLegacyHints(); // truncate the legacy hints table logger.info("Truncating {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS); legacyHintsTable.truncateBlocking(); }
public Callable<?> getInitializationTask() { // if we're just linking in the index on an already-built index post-restart or if the base // table is empty we've nothing to do. Otherwise, submit for building via SecondaryIndexBuilder return isBuilt() || baseCfs.isEmpty() ? null : getBuildIndexTask(); }
public void migrate() { // nothing to migrate if (legacyHintsTable.isEmpty()) return; logger.info("Migrating legacy hints to new storage"); // major-compact all of the existing sstables to get rid of the tombstones + expired hints logger.info("Forcing a major compaction of {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS); compactLegacyHints(); // paginate over legacy hints and write them to the new storage logger.info("Writing legacy hints to the new storage"); migrateLegacyHints(); // truncate the legacy hints table logger.info("Truncating {}.{} table", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_HINTS); legacyHintsTable.truncateBlocking(); }
private void deliverHintsToEndpoint(InetAddress endpoint) { if (hintStore.isEmpty()) return; // nothing to do, don't confuse users by logging a no-op handoff // check if hints delivery has been paused if (hintedHandOffPaused) { logger.debug("Hints delivery process is paused, aborting"); return; } logger.debug("Checking remote({}) schema before delivering hints", endpoint); try { waitForSchemaAgreement(endpoint); } catch (TimeoutException e) { return; } if (!FailureDetector.instance.isAlive(endpoint)) { logger.debug("Endpoint {} died before hint delivery, aborting", endpoint); return; } doDeliverHintsToEndpoint(endpoint); }
@SuppressWarnings("deprecation") public static void migrate() { ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG); // nothing to migrate if (store.isEmpty()) return; logger.info("Migrating legacy batchlog to new storage"); int convertedBatches = 0; String query = String.format("SELECT id, data, written_at, version FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_BATCHLOG); int pageSize = BatchlogManager.calculatePageSize(store); UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize); for (UntypedResultSet.Row row : rows) { if (apply(row, convertedBatches)) convertedBatches++; } if (convertedBatches > 0) Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking(); }
@SuppressWarnings("deprecation") public static void migrate() { ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG); // nothing to migrate if (store.isEmpty()) return; logger.info("Migrating legacy batchlog to new storage"); int convertedBatches = 0; String query = String.format("SELECT id, data, written_at, version FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_BATCHLOG); int pageSize = BatchlogManager.calculatePageSize(store); UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize); for (UntypedResultSet.Row row : rows) { if (apply(row, convertedBatches)) convertedBatches++; } if (convertedBatches > 0) Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking(); }
@SuppressWarnings("deprecation") public static void migrate() { ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG); // nothing to migrate if (store.isEmpty()) return; logger.info("Migrating legacy batchlog to new storage"); int convertedBatches = 0; String query = String.format("SELECT id, data, written_at, version FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_BATCHLOG); int pageSize = BatchlogManager.calculatePageSize(store); UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize); for (UntypedResultSet.Row row : rows) { if (apply(row, convertedBatches)) convertedBatches++; } if (convertedBatches > 0) Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking(); }
public Callable<?> getInitializationTask() { return () -> { initCounter++; assert initCounter == 1 : "index initialized more than once"; if (ElassandraDaemon.instance.node() != null) { logger.debug("Initializing elastic secondary index=[{}] hashCode={} initCounter={}", index_name, hashCode(), initCounter); // 2i index can be recycled by cassandra, while ES node restarted during tests, so update clusterService reference. clusterService = ElassandraDaemon.instance.node().injector().getInstance(ClusterService.class); clusterService.addListener(this); initMapping(); // Avoid inter-bocking with Keyspace.open()->rebuild()->flush()->open(). if (Keyspace.isInitialized() && !baseCfs.isEmpty() && !isBuilt()) baseCfs.indexManager.buildIndexBlocking(this); } else { clusterService = null; } return null; }; }