diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 10ebd2e66c304..edfb98799286b 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -102,6 +102,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -739,58 +740,6 @@ private static void validate(final String repositoryName, final String snapshotN } } - private static ShardGenerations buildGenerations(SnapshotsInProgress.Entry snapshot, Metadata metadata) { - ShardGenerations.Builder builder = ShardGenerations.builder(); - if (snapshot.isClone()) { - snapshot.shardSnapshotStatusByRepoShardId().forEach((key, value) -> builder.put(key.index(), key.shardId(), value)); - } else { - snapshot.shardSnapshotStatusByRepoShardId().forEach((key, value) -> { - final Index index = snapshot.indexByName(key.indexName()); - if (metadata.findIndex(index).isEmpty()) { - assert snapshot.partial() : "Index [" + index + "] was deleted during a snapshot but snapshot was not partial."; - return; - } - builder.put(key.index(), key.shardId(), value); - }); - } - return builder.build(); - } - - private static Metadata metadataForSnapshot(SnapshotsInProgress.Entry snapshot, Metadata metadata) { - final Metadata.Builder builder; - if (snapshot.includeGlobalState() == false) { - // Remove global state from the cluster state - builder = Metadata.builder(); - for (IndexId index : snapshot.indices().values()) { - final IndexMetadata indexMetadata = metadata.getProject().index(index.getName()); - if (indexMetadata == null) { - assert snapshot.partial() : "Index [" + index + "] was deleted during a snapshot but snapshot was not partial."; - } else { - builder.put(indexMetadata, false); - } - } - } else { - builder = Metadata.builder(metadata); - } - // Only keep those data streams in the metadata that were actually requested by the initial snapshot create operation and that have - // all their indices contained in the snapshot - final Map dataStreams = new HashMap<>(); - final Set indicesInSnapshot = snapshot.indices().keySet(); - for (String dataStreamName : snapshot.dataStreams()) { - DataStream dataStream = metadata.getProject().dataStreams().get(dataStreamName); - if (dataStream == null) { - assert snapshot.partial() - : "Data stream [" + dataStreamName + "] was deleted during a snapshot but snapshot was not partial."; - } else { - final DataStream reconciled = dataStream.snapshot(indicesInSnapshot, builder); - if (reconciled != null) { - dataStreams.put(dataStreamName, reconciled); - } - } - } - return builder.dataStreams(dataStreams, filterDataStreamAliases(dataStreams, metadata.getProject().dataStreamAliases())).build(); - } - /** * Returns status of the currently running snapshots *

@@ -1428,13 +1377,115 @@ protected void doRun() { assert currentlyFinalizing.contains(snapshot.getRepository()); assert repositoryOperations.assertNotQueued(snapshot); - SnapshotsInProgress.Entry entry = SnapshotsInProgress.get(clusterService.state()).snapshot(snapshot); - final String failure = entry.failure(); - logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshot, entry.state(), failure); - final ShardGenerations shardGenerations = buildGenerations(entry, metadata); - final List finalIndices = shardGenerations.indices().stream().map(IndexId::getName).toList(); - final Set indexNames = new HashSet<>(finalIndices); - ArrayList shardFailures = new ArrayList<>(); + final var entry = SnapshotsInProgress.get(clusterService.state()).snapshot(snapshot); + logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshot, entry.state(), entry.failure()); + final ShardGenerations shardGenerations = buildGenerations(entry); + final SubscribableListener>> snapshotListeners = new SubscribableListener<>(); + + ActionListener.run( + + new ActionListener() { + @Override + public void onResponse(RepositoryData updatedRepositoryData) { + // get a hold of the listeners for this snapshot here and store them in the future so they can be used + // by the snapshot info callback below and won't be failed needlessly if #runNextQueuedOperation runs into + // a fatal like e.g. this node stopped being the master node + snapshotListeners.onResponse(endAndGetListenersToResolve(snapshot)); + runNextQueuedOperation(updatedRepositoryData, snapshot.getRepository(), true); + } + + @Override + public void onFailure(Exception e) { + // we might have written the new root blob before failing here, so we must use the updated shardGenerations + handleFinalizationFailure(e, shardGenerations); + } + }, + + repositoryDataListener -> { + final List finalIndices = shardGenerations.indices().stream().map(IndexId::getName).toList(); + final Set finalIndicesSet = Set.copyOf(finalIndices); + final Repository repository = repositoriesService.repository(snapshot.getRepository()); + final Metadata metaForSnapshot = metadataForSnapshot(entry, prepareMetadata(entry, repository)); + final SnapshotInfo snapshotInfo = new SnapshotInfo( + snapshot, + finalIndices, + entry.dataStreams().stream().filter(metaForSnapshot.getProject().dataStreams()::containsKey).toList(), + entry.partial() ? onlySuccessfulFeatureStates(entry, finalIndicesSet) : entry.featureStates(), + entry.failure(), + threadPool.absoluteTimeInMillis(), + entry.partial() ? shardGenerations.totalShards() : entry.shardSnapshotStatusByRepoShardId().size(), + getSnapshotShardFailures(entry, finalIndicesSet), + entry.includeGlobalState(), + entry.userMetadata(), + entry.startTime(), + getIndexSnapshotDetailsMap(entry, finalIndices) + ); + repository.finalizeSnapshot( + new FinalizeSnapshotContext( + shardGenerations, + repositoryData.getGenId(), + metaForSnapshot, + snapshotInfo, + entry.version(), + repositoryDataListener, + () -> snapshotListeners.addListener(new ActionListener<>() { + @Override + public void onResponse(List> actionListeners) { + completeListenersIgnoringException(actionListeners, snapshotInfo); + logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); + } + + @Override + public void onFailure(Exception e) { + // never fails + assert false : e; + } + }) + ) + ); + } + ); + } + + @Override + public void onRejection(Exception e) { + if (e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown()) { + logger.debug("failing finalization of {} due to shutdown", snapshot); + handleFinalizationFailureBeforeUpdatingRootBlob(e); + } else { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + logger.error(Strings.format("unexpected failure finalizing %s", snapshot), e); + assert false : new AssertionError("unexpected failure finalizing " + snapshot, e); + handleFinalizationFailureBeforeUpdatingRootBlob(e); + } + + private ShardGenerations buildGenerations(SnapshotsInProgress.Entry snapshot) { + ShardGenerations.Builder builder = ShardGenerations.builder(); + if (snapshot.isClone()) { + snapshot.shardSnapshotStatusByRepoShardId().forEach((key, value) -> builder.put(key.index(), key.shardId(), value)); + } else { + snapshot.shardSnapshotStatusByRepoShardId().forEach((key, value) -> { + final Index index = snapshot.indexByName(key.indexName()); + if (metadata.findIndex(index).isEmpty()) { + assert snapshot.partial() : "Index [" + index + "] was deleted during a snapshot but snapshot was not partial."; + return; + } + builder.put(key.index(), key.shardId(), value); + }); + } + return builder.build(); + } + + /** + * @return the {@link SnapshotShardFailure}s from the {@link SnapshotsInProgress.Entry}. + */ + private static List getSnapshotShardFailures(SnapshotsInProgress.Entry entry, Set indexNames) { + final var shardFailures = new ArrayList(); for (Map.Entry shardStatus : entry.shardSnapshotStatusByRepoShardId().entrySet()) { RepositoryShardId shardId = shardStatus.getKey(); if (indexNames.contains(shardId.indexName()) == false) { @@ -1451,183 +1502,180 @@ protected void doRun() { assert state == ShardState.SUCCESS; } } - final String repository = snapshot.getRepository(); - final ListenableFuture metadataListener = new ListenableFuture<>(); - final Repository repo = repositoriesService.repository(snapshot.getRepository()); + return shardFailures; + } + + /** + * @return a {@link Metadata} to include in the snapshot: either the one from the cluster state (if taking a regular snapshot) or a + * made-up one loaded from the repository (if cloning an existing snapshot). + */ + private Metadata prepareMetadata(SnapshotsInProgress.Entry entry, Repository repository) throws IOException { if (entry.isClone()) { - // This listener is kinda unnecessary since we now always complete it synchronously. It's only here to catch exceptions. - // TODO simplify this. - ActionListener.completeWith(metadataListener, () -> { - final Metadata existing = repo.getSnapshotGlobalMetadata(entry.source()); - final Metadata.Builder metaBuilder = Metadata.builder(existing); - final Set existingIndices = new HashSet<>(); - for (IndexId index : entry.indices().values()) { - final IndexMetadata indexMetadata = repo.getSnapshotIndexMetaData(repositoryData, entry.source(), index); - existingIndices.add(indexMetadata.getIndex()); - metaBuilder.put(indexMetadata, false); - } - // remove those data streams from metadata for which we are missing indices - Map dataStreamsToCopy = new HashMap<>(); - for (Map.Entry dataStreamEntry : existing.getProject().dataStreams().entrySet()) { - if (existingIndices.containsAll(dataStreamEntry.getValue().getIndices())) { - dataStreamsToCopy.put(dataStreamEntry.getKey(), dataStreamEntry.getValue()); - } + // Synthesize a Metadata for the clone by loading all the relevant metadata blobs from the repository. + // TODO This shouldn't be necessary: if we're using the old-fashioned metadata-per-snapshot layout then we should be able + // to copy these blobs one-by-one rather than loading them all on-heap at once, and if we're using the new deduplicated + // index metadata layout then we should just be able to refer to the same metadata UUIDs in the clone without loading + // anything at all. + final Metadata existing = repository.getSnapshotGlobalMetadata(entry.source()); + final Metadata.Builder metaBuilder = Metadata.builder(existing); + final Set existingIndices = new HashSet<>(); + for (IndexId index : entry.indices().values()) { + final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repositoryData, entry.source(), index); + existingIndices.add(indexMetadata.getIndex()); + metaBuilder.put(indexMetadata, false); + } + // remove those data streams from metadata for which we are missing indices + Map dataStreamsToCopy = new HashMap<>(); + for (Map.Entry dataStreamEntry : existing.getProject().dataStreams().entrySet()) { + if (existingIndices.containsAll(dataStreamEntry.getValue().getIndices())) { + dataStreamsToCopy.put(dataStreamEntry.getKey(), dataStreamEntry.getValue()); } - Map dataStreamAliasesToCopy = filterDataStreamAliases( - dataStreamsToCopy, - existing.getProject().dataStreamAliases() - ); - metaBuilder.dataStreams(dataStreamsToCopy, dataStreamAliasesToCopy); - return metaBuilder.build(); - }); + } + Map dataStreamAliasesToCopy = filterDataStreamAliases( + dataStreamsToCopy, + existing.getProject().dataStreamAliases() + ); + metaBuilder.dataStreams(dataStreamsToCopy, dataStreamAliasesToCopy); + return metaBuilder.build(); } else { - metadataListener.onResponse(metadata); + return metadata; } - metadataListener.addListener(ActionListener.wrap(meta -> { - assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT); - final Metadata metaForSnapshot = metadataForSnapshot(entry, meta); + } - final Map indexSnapshotDetails = Maps.newMapWithExpectedSize( - finalIndices.size() - ); - for (Map.Entry shardEntry : entry.shardSnapshotStatusByRepoShardId().entrySet()) { - indexSnapshotDetails.compute(shardEntry.getKey().indexName(), (indexName, current) -> { - if (current == SnapshotInfo.IndexSnapshotDetails.SKIPPED) { - // already found an unsuccessful shard in this index, skip this shard - return current; - } + /** + * @return the result of munging the original {@link Metadata} (as returned from {@link #prepareMetadata}) to remove unnecessary + * or invalid parts (global metadata and incomplete datastreams). + */ + private static Metadata metadataForSnapshot(SnapshotsInProgress.Entry snapshot, Metadata metadata) { + final Metadata.Builder builder; + if (snapshot.includeGlobalState() == false) { + // Remove global state from the cluster state + builder = Metadata.builder(); + for (IndexId index : snapshot.indices().values()) { + final IndexMetadata indexMetadata = metadata.getProject().index(index.getName()); + if (indexMetadata == null) { + assert snapshot.partial() : "Index [" + index + "] was deleted during a snapshot but snapshot was not partial."; + } else { + builder.put(indexMetadata, false); + } + } + } else { + builder = Metadata.builder(metadata); + } + // Only keep those data streams in the metadata that were actually requested by the initial snapshot create operation and that + // have all their indices contained in the snapshot + final Map dataStreams = new HashMap<>(); + final Set indicesInSnapshot = snapshot.indices().keySet(); + for (String dataStreamName : snapshot.dataStreams()) { + DataStream dataStream = metadata.getProject().dataStreams().get(dataStreamName); + if (dataStream == null) { + assert snapshot.partial() + : "Data stream [" + dataStreamName + "] was deleted during a snapshot but snapshot was not partial."; + } else { + final DataStream reconciled = dataStream.snapshot(indicesInSnapshot, builder); + if (reconciled != null) { + dataStreams.put(dataStreamName, reconciled); + } + } + } + return builder.dataStreams(dataStreams, filterDataStreamAliases(dataStreams, metadata.getProject().dataStreamAliases())) + .build(); + } - final ShardSnapshotStatus shardSnapshotStatus = shardEntry.getValue(); - if (shardSnapshotStatus.state() != ShardState.SUCCESS) { - // first unsuccessful shard in this index found, record that this index should be skipped - return SnapshotInfo.IndexSnapshotDetails.SKIPPED; - } + /** + * Removes all feature states which have missing or failed shards, as they are no longer safely restorable. + * @param entry The "in progress" entry with a list of feature states and one or more failed shards. + * @param finalIndices The final list of indices in the snapshot, after any indices that were concurrently deleted are removed. + * @return The list of feature states which were completed successfully in the given entry. + */ + private static List onlySuccessfulFeatureStates(SnapshotsInProgress.Entry entry, Set finalIndices) { + assert entry.partial() : "should not try to filter feature states from a non-partial entry"; + + // Figure out which indices have unsuccessful shards + Set indicesWithUnsuccessfulShards = new HashSet<>(); + entry.shardSnapshotStatusByRepoShardId().forEach((key, value) -> { + final ShardState shardState = value.state(); + if (shardState.failed() || shardState.completed() == false) { + indicesWithUnsuccessfulShards.add(key.indexName()); + } + }); - final ShardSnapshotResult result = shardSnapshotStatus.shardSnapshotResult(); - if (result == null) { - // detailed result not recorded, skip this index - return SnapshotInfo.IndexSnapshotDetails.SKIPPED; - } + // Now remove any feature states which contain any of those indices, as the feature state is not intact and not safely + // restorable + return entry.featureStates() + .stream() + .filter(stateInfo -> finalIndices.containsAll(stateInfo.getIndices())) + .filter(stateInfo -> stateInfo.getIndices().stream().anyMatch(indicesWithUnsuccessfulShards::contains) == false) + .toList(); + } - if (current == null) { - return new SnapshotInfo.IndexSnapshotDetails(1, result.getSize(), result.getSegmentCount()); - } else { - return new SnapshotInfo.IndexSnapshotDetails( - current.getShardCount() + 1, - ByteSizeValue.ofBytes(current.getSize().getBytes() + result.getSize().getBytes()), - Math.max(current.getMaxSegmentsPerShard(), result.getSegmentCount()) - ); - } - }); - } - indexSnapshotDetails.entrySet().removeIf(e -> e.getValue().getShardCount() == 0); + private static Map getIndexSnapshotDetailsMap( + SnapshotsInProgress.Entry entry, + List finalIndices + ) { + final Map indexSnapshotDetails = Maps.newMapWithExpectedSize(finalIndices.size()); + for (Map.Entry shardEntry : entry.shardSnapshotStatusByRepoShardId().entrySet()) { + indexSnapshotDetails.compute(shardEntry.getKey().indexName(), (indexName, current) -> { + if (current == SnapshotInfo.IndexSnapshotDetails.SKIPPED) { + // already found an unsuccessful shard in this index, skip this shard + return current; + } - final SnapshotInfo snapshotInfo = new SnapshotInfo( - snapshot, - finalIndices, - entry.dataStreams().stream().filter(metaForSnapshot.getProject().dataStreams()::containsKey).toList(), - entry.partial() ? onlySuccessfulFeatureStates(entry, finalIndices) : entry.featureStates(), - failure, - threadPool.absoluteTimeInMillis(), - entry.partial() ? shardGenerations.totalShards() : entry.shardSnapshotStatusByRepoShardId().size(), - shardFailures, - entry.includeGlobalState(), - entry.userMetadata(), - entry.startTime(), - indexSnapshotDetails - ); - final ListenableFuture>> snapshotListeners = new ListenableFuture<>(); - repo.finalizeSnapshot( - new FinalizeSnapshotContext( - shardGenerations, - repositoryData.getGenId(), - metaForSnapshot, - snapshotInfo, - entry.version(), - ActionListener.wrap(updatedRepositoryData -> { - // get a hold of the listeners for this snapshot here and store them in the future so they can be used - // by the snapshot info callback below and won't be failed needlessly if #runNextQueuedOperation runs into - // a fatal like e.g. this node stopped being the master node - snapshotListeners.onResponse(endAndGetListenersToResolve(snapshot)); - runNextQueuedOperation(updatedRepositoryData, repository, true); - }, - e -> handleFinalizationFailure( - e, - snapshot, - repositoryData, - // we might have written the new root blob before failing here, so we must use the updated shardGenerations - shardGenerations - ) - ), - () -> snapshotListeners.addListener(new ActionListener<>() { - @Override - public void onResponse(List> actionListeners) { - completeListenersIgnoringException(actionListeners, snapshotInfo); - logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); - } + final ShardSnapshotStatus shardSnapshotStatus = shardEntry.getValue(); + if (shardSnapshotStatus.state() != ShardState.SUCCESS) { + // first unsuccessful shard in this index found, record that this index should be skipped + return SnapshotInfo.IndexSnapshotDetails.SKIPPED; + } - @Override - public void onFailure(Exception e) { - // never fails - assert false : e; - } - }) - ) - ); - }, - e -> handleFinalizationFailure( - e, - snapshot, - repositoryData, - // a failure here means the root blob was not updated, but the updated shard generation blobs are all in place so we can - // use the updated shardGenerations for all pending shard snapshots - shardGenerations - ) - )); - } + final ShardSnapshotResult result = shardSnapshotStatus.shardSnapshotResult(); + if (result == null) { + // detailed result not recorded, skip this index + return SnapshotInfo.IndexSnapshotDetails.SKIPPED; + } - @Override - public void onRejection(Exception e) { - if (e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown()) { - logger.debug("failing finalization of {} due to shutdown", snapshot); - handleFinalizationFailure(e, snapshot, repositoryData, ShardGenerations.EMPTY); - } else { - onFailure(e); + if (current == null) { + return new SnapshotInfo.IndexSnapshotDetails(1, result.getSize(), result.getSegmentCount()); + } else { + return new SnapshotInfo.IndexSnapshotDetails( + current.getShardCount() + 1, + ByteSizeValue.ofBytes(current.getSize().getBytes() + result.getSize().getBytes()), + Math.max(current.getMaxSegmentsPerShard(), result.getSegmentCount()) + ); + } + }); } + indexSnapshotDetails.entrySet().removeIf(e -> e.getValue().getShardCount() == 0); + return indexSnapshotDetails; } - @Override - public void onFailure(Exception e) { - logger.error(Strings.format("unexpected failure finalizing %s", snapshot), e); - assert false : new AssertionError("unexpected failure finalizing " + snapshot, e); - handleFinalizationFailure(e, snapshot, repositoryData, ShardGenerations.EMPTY); + private void handleFinalizationFailureBeforeUpdatingRootBlob(Exception e) { + // No need to update shard generations in cluster state if we didn't update the root blob, so use ShardGenerations.EMPTY + handleFinalizationFailure(e, ShardGenerations.EMPTY); } - } - /** - * Removes all feature states which have missing or failed shards, as they are no longer safely restorable. - * @param entry The "in progress" entry with a list of feature states and one or more failed shards. - * @param finalIndices The final list of indices in the snapshot, after any indices that were concurrently deleted are removed. - * @return The list of feature states which were completed successfully in the given entry. - */ - private static List onlySuccessfulFeatureStates(SnapshotsInProgress.Entry entry, List finalIndices) { - assert entry.partial() : "should not try to filter feature states from a non-partial entry"; - - // Figure out which indices have unsuccessful shards - Set indicesWithUnsuccessfulShards = new HashSet<>(); - entry.shardSnapshotStatusByRepoShardId().forEach((key, value) -> { - final ShardState shardState = value.state(); - if (shardState.failed() || shardState.completed() == false) { - indicesWithUnsuccessfulShards.add(key.indexName()); + /** + * Handles failure to finalize a snapshot. If the exception indicates that this node was unable to publish a cluster state and + * stopped being the master node, then fail all snapshot create and delete listeners executing on this node by delegating to + * {@link #failAllListenersOnMasterFailOver}. Otherwise, i.e. as a result of failing to write to the snapshot repository for some + * reason, remove the snapshot's {@link SnapshotsInProgress.Entry} from the cluster state and move on with other queued snapshot + * operations if there are any. + */ + private void handleFinalizationFailure(Exception e, ShardGenerations shardGenerations) { + if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { + // Failure due to not being master any more, don't try to remove snapshot from cluster state the next master + // will try ending this snapshot again + logger.debug(() -> "[" + snapshot + "] failed to update cluster state during snapshot finalization", e); + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "Failed to update cluster state during snapshot finalization", e), + Runnable::run + ); + failAllListenersOnMasterFailOver(e); + } else { + logger.warn(() -> "[" + snapshot + "] failed to finalize snapshot", e); + removeFailedSnapshotFromClusterState(snapshot, e, repositoryData, shardGenerations); } - }); - - // Now remove any feature states which contain any of those indices, as the feature state is not intact and not safely restorable - return entry.featureStates() - .stream() - .filter(stateInfo -> finalIndices.containsAll(stateInfo.getIndices())) - .filter(stateInfo -> stateInfo.getIndices().stream().anyMatch(indicesWithUnsuccessfulShards::contains) == false) - .toList(); + } } /** @@ -1641,39 +1689,6 @@ private List> endAndGetListenersToResolve(Snapshot return listenersToComplete; } - /** - * Handles failure to finalize a snapshot. If the exception indicates that this node was unable to publish a cluster state and stopped - * being the master node, then fail all snapshot create and delete listeners executing on this node by delegating to - * {@link #failAllListenersOnMasterFailOver}. Otherwise, i.e. as a result of failing to write to the snapshot repository for some - * reason, remove the snapshot's {@link SnapshotsInProgress.Entry} from the cluster state and move on with other queued snapshot - * operations if there are any. - * - * @param e exception encountered - * @param snapshot snapshot that failed to finalize - * @param repositoryData current repository data for the snapshot's repository - */ - private void handleFinalizationFailure( - Exception e, - Snapshot snapshot, - RepositoryData repositoryData, - ShardGenerations shardGenerations - ) { - if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { - // Failure due to not being master any more, don't try to remove snapshot from cluster state the next master - // will try ending this snapshot again - logger.debug(() -> "[" + snapshot + "] failed to update cluster state during snapshot finalization", e); - failSnapshotCompletionListeners( - snapshot, - new SnapshotException(snapshot, "Failed to update cluster state during snapshot finalization", e), - Runnable::run - ); - failAllListenersOnMasterFailOver(e); - } else { - logger.warn(() -> "[" + snapshot + "] failed to finalize snapshot", e); - removeFailedSnapshotFromClusterState(snapshot, e, repositoryData, shardGenerations); - } - } - /** * Run the next queued up repository operation for the given repository name. *