private Resolved resolveIndexPatterns(final String... requestedPatterns) { if(log.isTraceEnabled()) { log.trace("resolve requestedPatterns: "+Arrays.toString(requestedPatterns)); final SortedMap<String, AliasOrIndex> lookup = state.metaData().getAliasAndIndexLookup(); final Set<String> aliases = lookup.entrySet().stream().filter(e->e.getValue().isAlias()).map(e->e.getKey()).collect(Collectors.toSet()); try { _indices = new ArrayList<>(Arrays.asList(resolver.concreteIndexNames(state, IndicesOptions.fromOptions(false, true, true, false), requestedPatterns))); if (log.isDebugEnabled()) { log.debug("Resolved pattern {} to {}", requestedPatterns, _indices); Set<String> doubleIndices = lookup.get(al).getIndices().stream().map(a->a.getIndex().getName()).collect(Collectors.toSet()); _indices.removeAll(doubleIndices);
private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) { super(settings, metaData.getSettings(), other); }
.actionGet() .getState() .getMetaData() .getIndices() .get(mapping.get_index()) .getMappings(); } catch (NullPointerException e) { throw new IllegalArgumentException("Not found the mapping info of index: " + mapping.get_index()); MappingMetaData mappingMetaData = mappings.get(mapping.get_type()); if (mappingMetaData == null) { throw new IllegalArgumentException("Not found the mapping info of index: " + mapping.get_index());
public boolean equalsAliases(MetaData other) { for (ObjectCursor<IndexMetaData> cursor : other.indices().values()) { IndexMetaData otherIndex = cursor.value; IndexMetaData thisIndex = index(otherIndex.getIndex()); if (thisIndex == null) { return false; } if (otherIndex.getAliases().equals(thisIndex.getAliases()) == false) { return false; } } return true; }
/** * Elasticsearch v6.0 no longer supports indices created pre v5.0. All indices * that were created before Elasticsearch v5.0 should be re-indexed in Elasticsearch 5.x * before they can be opened by this version of elasticsearch. */ private void checkSupportedVersion(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) { if (indexMetaData.getState() == IndexMetaData.State.OPEN && isSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion) == false) { throw new IllegalStateException("The index [" + indexMetaData.getIndex() + "] was created with version [" + indexMetaData.getCreationVersion() + "] but the minimum compatible version is [" + minimumIndexCompatibilityVersion + "]. It should be re-indexed in Elasticsearch " + minimumIndexCompatibilityVersion.major + ".x before upgrading to " + Version.CURRENT + "."); } }
private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) { // Index exist - checking that it's closed if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) { // TODO: Enable restore for open indices throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because an open index with same name already exists in the cluster. " + "Either close or delete the existing index or restore the index under a different name by providing a rename pattern and replacement name"); } // Index exist - checking if it's partial restore if (partial) { throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists"); } // Make sure that the number of shards is the same. That's the only thing that we cannot change if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) { throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shards from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards"); } }
final IndexMetaData indexMetaData = clusterService.state().metaData().getIndices().get(requestAliasOrIndex); log.debug("{} does not exist in cluster metadata", requestAliasOrIndex); continue; final ImmutableOpenMap<String, AliasMetaData> aliases = indexMetaData.getAliases(); if(aliases != null && aliases.size() > 0) { if(log.isDebugEnabled()) { log.debug("Aliases for {}: {}", requestAliasOrIndex, aliases); final Iterator<String> it = aliases.keysIt(); while(it.hasNext()) { final String alias = it.next();
assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + newIndexMetaData.getIndex(); logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; CompressedXContent incomingMappingSource = newIndexMetaData.mapping(mappingType).source(); if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) { logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string()); } else if (logger.isTraceEnabled()) { logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index); for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) { String parentType = newMapper.parentFieldMapper().type(); if (parentType.equals(mapping.value.type()) && throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]"); MetaData.Builder builder = MetaData.builder(metaData); boolean updated = false; for (IndexMetaData indexMetaData : updateList) { final Index index = indexMetaData.getIndex(); final MapperService mapperService = indexMapperServices.get(index); CompressedXContent existingSource = null; updatedMapping = true; if (logger.isDebugEnabled()) { logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource); } else if (logger.isInfoEnabled()) { logger.info("{} update_mapping [{}]", index, mergedMapper.type()); IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); return ClusterState.builder(currentState).metaData(builder).build(); } else { return currentState;
StringBuilder sb = new StringBuilder(); for (IndexMetaData indexMetaData : request.indices) { if (indexMetaData.getCreationVersion().before(minIndexCompatibilityVersion)) { logger.warn("ignoring dangled index [{}] on node [{}]" + " since it's created version [{}] is not supported by at least one node in the cluster minVersion [{}]", indexMetaData.getIndex(), request.fromNode, indexMetaData.getCreationVersion(), minIndexCompatibilityVersion); continue; if (currentState.metaData().hasIndex(indexMetaData.getIndex().getName())) { continue; if (currentState.metaData().hasAlias(indexMetaData.getIndex().getName())) { logger.warn("ignoring dangled index [{}] on node [{}] due to an existing alias with the same name", indexMetaData.getIndex(), request.fromNode); continue; logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be " + "upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE) .version(indexMetaData.getVersion() + 1).build(); if (upgradedIndexMetaData.getState() == IndexMetaData.State.OPEN) { routingTableBuilder.addAsFromDangling(upgradedIndexMetaData); sb.append("[").append(upgradedIndexMetaData.getIndex()).append("/").append(upgradedIndexMetaData.getState()) .append("]");
if (!event.state().blocks().isSame(event.previousState().blocks(), mappingInfo.indices == null ? Collections.EMPTY_LIST : Arrays.stream(mappingInfo.indices).map(i -> i.name).collect(Collectors.toList()))) { updateMapping = true; } else { for (ObjectCursor<IndexMetaData> cursor : event.state().metaData().indices().values()) { IndexMetaData indexMetaData = cursor.value; if (indexMetaData.keyspace().equals(this.baseCfs.metadata.ksName) && indexMetaData.mapping(ClusterService.cfNameToType(this.baseCfs.name)) != null && !indexMetaData.equals(event.previousState().metaData().index(indexMetaData.getIndex().getName()))) { updateMapping = true; break; try { mappingInfo = new ImmutableMappingInfo(event.state()); logger.debug("secondary index=[{}] metadata.version={} mappingInfo.indices={}", this.index_name, event.state().metaData().version(), mappingInfo.indices == null ? "" : Arrays.stream(mappingInfo.indices).map(i -> i.name).collect(Collectors.joining())); } catch(Exception e) { logger.error("Failed to update mapping index=["+index_name+"]", e); } finally { mappingInfoLock.writeLock().unlock();
final Set<String> excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size()); for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) { excludeIndexPathIds.add(cursor.value.getIndex().getUUID()); final List<IndexMetaData> indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains); Map<Index, IndexMetaData> newIndices = new HashMap<>(indexMetaDataList.size()); final IndexGraveyard graveyard = metaData.indexGraveyard(); for (IndexMetaData indexMetaData : indexMetaDataList) { if (metaData.hasIndex(indexMetaData.getIndex().getName())) { logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata", indexMetaData.getIndex()); } else if (graveyard.containsIndex(indexMetaData.getIndex())) { logger.warn("[{}] can not be imported as a dangling index, as an index with the same name and UUID exist in the " + "index tombstones. This situation is likely caused by copying over the data directory for an index " + "that was previously deleted.", indexMetaData.getIndex()); } else { logger.info("[{}] dangling index exists on local file system, but not in cluster metadata, " + "auto import to cluster state", indexMetaData.getIndex()); newIndices.put(indexMetaData.getIndex(), indexMetaData);
/** * Cleans dangling indices if they are already allocated on the provided meta data. */ void cleanupAllocatedDangledIndices(MetaData metaData) { for (Index index : danglingIndices.keySet()) { final IndexMetaData indexMetaData = metaData.index(index); if (indexMetaData != null && indexMetaData.getIndex().getName().equals(index.getName())) { if (indexMetaData.getIndex().getUUID().equals(index.getUUID()) == false) { logger.warn("[{}] can not be imported as a dangling index, as there is already another index " + "with the same name but a different uuid. local index will be ignored (but not deleted)", index); } else { logger.debug("[{}] no longer dangling (created), removing from dangling list", index); } danglingIndices.remove(index); } } }
public ClusterState closeIndices(ClusterState currentState, final Index[] indices, String indicesAsString) { Set<IndexMetaData> indicesToClose = new HashSet<>(); for (Index index : indices) { final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { indicesToClose.add(indexMetaData); logger.info("closing indices [{}]", indicesAsString); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); for (IndexMetaData openIndexMetadata : indicesToClose) { final String indexName = openIndexMetadata.getIndex().getName(); mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE)); blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK); RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); for (IndexMetaData index : indicesToClose) { rtBuilder.remove(index.getIndex().getName());
threadContext.putHeader(ConfigConstants.SG_CONF_REQUEST_HEADER, "true"); boolean searchGuardIndexExists = clusterService.state().metaData().hasConcreteIndex(this.searchguardIndex); if(clusterService.state().metaData().index(this.searchguardIndex).mapping("config") != null) { LOGGER.debug("sg index exists and was created before ES 6 (legacy layout)"); retVal.putAll(validate(legacycl.loadLegacy(configTypes.toArray(new String[0]), 5, TimeUnit.SECONDS), configTypes.size())); } else { LOGGER.debug("sg index exists and was created with ES 6 (new layout)"); retVal.putAll(validate(cl.load(configTypes.toArray(new String[0]), 5, TimeUnit.SECONDS), configTypes.size())); LOGGER.debug("sg index not exists (yet)"); retVal.putAll(validate(cl.load(configTypes.toArray(new String[0]), 30, TimeUnit.SECONDS), configTypes.size()));
/** * Writes the index state. * * This method is public for testing purposes. */ public void writeIndex(String reason, IndexMetaData indexMetaData) throws IOException { final Index index = indexMetaData.getIndex(); logger.trace("[{}] writing state, reason [{}]", index, reason); try { IndexMetaData.FORMAT.write(indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex())); logger.trace("[{}] state written", index); } catch (Exception ex) { logger.warn(() -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); throw new IOException("failed to write state for [" + index + "]", ex); } }
final String localNodeId = state.nodes().getLocalNodeId(); assert localNodeId != null; RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId); if (localRoutingNode != null) { // null e.g. if we are not a data node for (ShardRouting shardRouting : localRoutingNode) { indicesWithShards.add(shardRouting.index()); final IndexMetaData indexMetaData = state.metaData().index(index); assert indexMetaData != null || event.isNewCluster() : "index " + index + " does not exist in the cluster state, it should either " + "have been deleted or the cluster must be new"; final AllocatedIndices.IndexRemovalReason reason = indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE ? CLOSED : NO_LONGER_ASSIGNED; logger.debug("{} removing index, [{}]", index, reason); indicesService.removeIndex(index, reason, "removing index (no shards allocated)");
/** * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index * but does not deal with in-memory structures. For those call {@link #removeIndex(Index, IndexRemovalReason, String)} */ @Override public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) { if (nodeEnv.hasNodeFile()) { String indexName = metaData.getIndex().getName(); try { if (clusterState.metaData().hasIndex(indexName)) { final IndexMetaData index = clusterState.metaData().index(indexName); throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of " + "the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } deleteIndexStore(reason, metaData, clusterState); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e); } } }
/** * Loads all indices states available on disk */ List<IndexMetaData> loadIndicesStates(Predicate<String> excludeIndexPathIdsPredicate) throws IOException { List<IndexMetaData> indexMetaDataList = new ArrayList<>(); for (String indexFolderName : nodeEnv.availableIndexFolders(excludeIndexPathIdsPredicate)) { assert excludeIndexPathIdsPredicate.test(indexFolderName) == false : "unexpected folder " + indexFolderName + " which should have been excluded"; IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.resolveIndexFolder(indexFolderName)); if (indexMetaData != null) { final String indexPathId = indexMetaData.getIndex().getUUID(); if (indexFolderName.equals(indexPathId)) { indexMetaDataList.add(indexMetaData); } else { throw new IllegalStateException("[" + indexFolderName+ "] invalid index folder name, rename to [" + indexPathId + "]"); } } else { logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); } } return indexMetaDataList; }
for (ObjectObjectCursor<String, MappingMetaData> mapping : sourceMetaData.getMappings()) { mappingUpdateConsumer.accept(mapping.key, mapping.value); final boolean isSplit = sourceMetaData.getNumberOfShards() < indexShard.indexSettings().getNumberOfShards(); assert isSplit == false || sourceMetaData.getCreationVersion().onOrAfter(Version.V_6_0_0_alpha1) : "for split we require a " + "single type but the index is created before 6.0.0"; return executeRecovery(indexShard, () -> { logger.debug("starting recovery from local shards {}", shards); try { final Directory directory = indexShard.store().directory(); // don't close this directory!!