diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 6e15d64154960..9efb9c8b498aa 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index abd11068e7a65..b1e5a7bf933c9 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -287,8 +287,8 @@ steps: env: BWC_VERSION: 8.15.5 - - label: "{{matrix.image}} / 8.16.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.2 + - label: "{{matrix.image}} / 8.16.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.3 timeout_in_minutes: 300 matrix: setup: @@ -301,7 +301,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.16.2 + BWC_VERSION: 8.16.3 - label: "{{matrix.image}} / 8.17.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.1 diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index c5846a763f5e8..ea0d7b13b55b4 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -63,6 +63,7 @@ steps: image: - almalinux-8-aarch64 - ubuntu-2004-aarch64 + - ubuntu-2404-aarch64 GRADLE_TASK: - checkPart1 - checkPart2 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index f2d169cd2b30d..4c593bae62d7a 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -306,8 +306,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.16.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.2#bwcTest + - label: 8.16.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.3#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -316,7 +316,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.16.2 + BWC_VERSION: 8.16.3 retry: automatic: - exit_status: "-1" @@ -448,7 +448,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -490,7 +490,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.16.2", "8.17.1", "8.18.0", "9.0.0"] + BWC_VERSION: ["8.16.3", "8.17.1", "8.18.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/scripts/get-latest-test-mutes.sh b/.buildkite/scripts/get-latest-test-mutes.sh index 5721e29f1b773..1dafcebec24b1 100755 --- a/.buildkite/scripts/get-latest-test-mutes.sh +++ b/.buildkite/scripts/get-latest-test-mutes.sh @@ -1,6 +1,6 @@ #!/bin/bash -if [[ ! "${BUILDKITE_PULL_REQUEST:-}" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then +if [[ "${BUILDKITE_PULL_REQUEST:-false}" == "false" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then exit 0 fi diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 3cb983373138f..cf12ee8c15419 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -15,7 +15,7 @@ BWC_VERSION: - "8.13.4" - "8.14.3" - "8.15.5" - - "8.16.2" + - "8.16.3" - "8.17.1" - "8.18.0" - "9.0.0" diff --git a/.ci/init.gradle b/.ci/init.gradle index 3e1f23804cf98..15d63f8ca7d20 100644 --- a/.ci/init.gradle +++ b/.ci/init.gradle @@ -1,95 +1,3 @@ -import com.bettercloud.vault.VaultConfig -import com.bettercloud.vault.Vault - -initscript { - repositories { - mavenCentral() - } - dependencies { - classpath 'com.bettercloud:vault-java-driver:4.1.0' - } -} - -boolean USE_ARTIFACTORY = false - -if (System.getenv('VAULT_ADDR') == null) { - // When trying to reproduce errors outside of CI, it can be useful to allow this to just return rather than blowing up - if (System.getenv('CI') == null) { - return - } - - throw new GradleException("You must set the VAULT_ADDR environment variable to use this init script.") -} - -if (System.getenv('VAULT_ROLE_ID') == null && System.getenv('VAULT_SECRET_ID') == null && System.getenv('VAULT_TOKEN') == null) { - // When trying to reproduce errors outside of CI, it can be useful to allow this to just return rather than blowing up - if (System.getenv('CI') == null) { - return - } - - throw new GradleException("You must set either the VAULT_ROLE_ID and VAULT_SECRET_ID environment variables, " + - "or the VAULT_TOKEN environment variable to use this init script.") -} - -final String vaultPathPrefix = System.getenv('VAULT_ADDR') ==~ /.+vault-ci.+\.dev.*/ ? "secret/ci/elastic-elasticsearch/migrated" : "secret/elasticsearch-ci" - -final String vaultToken = System.getenv('VAULT_TOKEN') ?: new Vault( - new VaultConfig() - .address(System.env.VAULT_ADDR) - .engineVersion(1) - .build() -) - .withRetries(5, 1000) - .auth() - .loginByAppRole("approle", System.env.VAULT_ROLE_ID, System.env.VAULT_SECRET_ID) - .getAuthClientToken() - -final Vault vault = new Vault( - new VaultConfig() - .address(System.env.VAULT_ADDR) - .engineVersion(1) - .token(vaultToken) - .build() -) - .withRetries(5, 1000) - - -if (USE_ARTIFACTORY) { - final Map artifactoryCredentials = vault.logical() - .read("${vaultPathPrefix}/artifactory.elstc.co") - .getData() - logger.info("Using elastic artifactory repos") - Closure configCache = { - return { - name "artifactory-gradle-release" - url "https://artifactory.elstc.co/artifactory/gradle-release" - credentials { - username artifactoryCredentials.get("username") - password artifactoryCredentials.get("token") - } - } - } - settingsEvaluated { settings -> - settings.pluginManagement { - repositories { - maven configCache() - } - } - } - projectsLoaded { - allprojects { - buildscript { - repositories { - maven configCache() - } - } - repositories { - maven configCache() - } - } - } -} - gradle.settingsEvaluated { settings -> settings.pluginManager.withPlugin("com.gradle.develocity") { settings.develocity { @@ -98,14 +6,10 @@ gradle.settingsEvaluated { settings -> } } - final String buildCacheUrl = System.getProperty('org.elasticsearch.build.cache.url') final boolean buildCachePush = Boolean.valueOf(System.getProperty('org.elasticsearch.build.cache.push', 'false')) if (buildCacheUrl) { - final Map buildCacheCredentials = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ? [:] : vault.logical() - .read("${vaultPathPrefix}/gradle-build-cache") - .getData() gradle.settingsEvaluated { settings -> settings.buildCache { local { @@ -116,11 +20,10 @@ if (buildCacheUrl) { url = buildCacheUrl push = buildCachePush credentials { - username = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ?: buildCacheCredentials.get("username") - password = System.getenv("GRADLE_BUILD_CACHE_PASSWORD") ?: buildCacheCredentials.get("password") + username = System.getenv("GRADLE_BUILD_CACHE_USERNAME") + password = System.getenv("GRADLE_BUILD_CACHE_PASSWORD") } } } } } - diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index e05c0774c9819..68c6ad5601546 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "8.16.2" + - "8.16.3" - "8.17.1" - "8.18.0" - "9.0.0" diff --git a/.gitignore b/.gitignore index d1af97cbaea3b..8b2da4dc0832a 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,6 @@ testfixtures_shared/ # Generated checkstyle_ide.xml x-pack/plugin/esql/src/main/generated-src/generated/ + +# JEnv +.java-version diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java index 9aab4a3e3210f..d3259b9604717 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java @@ -27,6 +27,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; +import org.elasticsearch.xpack.esql.core.expression.FoldContext; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -71,12 +72,11 @@ public class EvalBenchmark { BigArrays.NON_RECYCLING_INSTANCE ); + private static final FoldContext FOLD_CONTEXT = FoldContext.small(); + private static final int BLOCK_LENGTH = 8 * 1024; - static final DriverContext driverContext = new DriverContext( - BigArrays.NON_RECYCLING_INSTANCE, - BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE) - ); + static final DriverContext driverContext = new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, blockFactory); static { // Smoke test all the expected values and force loading subclasses more like prod @@ -114,11 +114,12 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) { return switch (operation) { case "abs" -> { FieldAttribute longField = longField(); - yield EvalMapper.toEvaluator(new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext); + yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext); } case "add" -> { FieldAttribute longField = longField(); yield EvalMapper.toEvaluator( + FOLD_CONTEXT, new Add(Source.EMPTY, longField, new Literal(Source.EMPTY, 1L, DataType.LONG)), layout(longField) ).get(driverContext); @@ -126,6 +127,7 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) { case "add_double" -> { FieldAttribute doubleField = doubleField(); yield EvalMapper.toEvaluator( + FOLD_CONTEXT, new Add(Source.EMPTY, doubleField, new Literal(Source.EMPTY, 1D, DataType.DOUBLE)), layout(doubleField) ).get(driverContext); @@ -140,7 +142,8 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) { lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG)); rhs = new Add(Source.EMPTY, rhs, new Literal(Source.EMPTY, 1L, DataType.LONG)); } - yield EvalMapper.toEvaluator(new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2)).get(driverContext); + yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2)) + .get(driverContext); } case "date_trunc" -> { FieldAttribute timestamp = new FieldAttribute( @@ -149,6 +152,7 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) { new EsField("timestamp", DataType.DATETIME, Map.of(), true) ); yield EvalMapper.toEvaluator( + FOLD_CONTEXT, new DateTrunc(Source.EMPTY, new Literal(Source.EMPTY, Duration.ofHours(24), DataType.TIME_DURATION), timestamp), layout(timestamp) ).get(driverContext); @@ -156,6 +160,7 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) { case "equal_to_const" -> { FieldAttribute longField = longField(); yield EvalMapper.toEvaluator( + FOLD_CONTEXT, new Equals(Source.EMPTY, longField, new Literal(Source.EMPTY, 100_000L, DataType.LONG)), layout(longField) ).get(driverContext); @@ -163,21 +168,21 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) { case "long_equal_to_long" -> { FieldAttribute lhs = longField(); FieldAttribute rhs = longField(); - yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext); + yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext); } case "long_equal_to_int" -> { FieldAttribute lhs = longField(); FieldAttribute rhs = intField(); - yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext); + yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext); } case "mv_min", "mv_min_ascending" -> { FieldAttribute longField = longField(); - yield EvalMapper.toEvaluator(new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext); + yield EvalMapper.toEvaluator(FOLD_CONTEXT, new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext); } case "rlike" -> { FieldAttribute keywordField = keywordField(); RLike rlike = new RLike(Source.EMPTY, keywordField, new RLikePattern(".ar")); - yield EvalMapper.toEvaluator(rlike, layout(keywordField)).get(driverContext); + yield EvalMapper.toEvaluator(FOLD_CONTEXT, rlike, layout(keywordField)).get(driverContext); } default -> throw new UnsupportedOperationException(); }; diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/VectorScorerBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/VectorScorerBenchmark.java index b294fe97c7e7c..fdb09594a1cda 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/VectorScorerBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/VectorScorerBenchmark.java @@ -83,6 +83,8 @@ public class VectorScorerBenchmark { RandomVectorScorer luceneDotScorerQuery; RandomVectorScorer nativeDotScorerQuery; + RandomVectorScorer luceneSqrScorerQuery; + RandomVectorScorer nativeSqrScorerQuery; @Setup public void setup() throws IOException { @@ -130,6 +132,8 @@ public void setup() throws IOException { } luceneDotScorerQuery = luceneScorer(values, VectorSimilarityFunction.DOT_PRODUCT, queryVec); nativeDotScorerQuery = factory.getInt7SQVectorScorer(VectorSimilarityFunction.DOT_PRODUCT, values, queryVec).get(); + luceneSqrScorerQuery = luceneScorer(values, VectorSimilarityFunction.EUCLIDEAN, queryVec); + nativeSqrScorerQuery = factory.getInt7SQVectorScorer(VectorSimilarityFunction.EUCLIDEAN, values, queryVec).get(); // sanity var f1 = dotProductLucene(); @@ -157,6 +161,12 @@ public void setup() throws IOException { if (q1 != q2) { throw new AssertionError("query: lucene[" + q1 + "] != " + "native[" + q2 + "]"); } + + var sqr1 = squareDistanceLuceneQuery(); + var sqr2 = squareDistanceNativeQuery(); + if (sqr1 != sqr2) { + throw new AssertionError("query: lucene[" + q1 + "] != " + "native[" + q2 + "]"); + } } @TearDown @@ -217,6 +227,16 @@ public float squareDistanceScalar() { return 1 / (1f + adjustedDistance); } + @Benchmark + public float squareDistanceLuceneQuery() throws IOException { + return luceneSqrScorerQuery.score(1); + } + + @Benchmark + public float squareDistanceNativeQuery() throws IOException { + return nativeSqrScorerQuery.score(1); + } + QuantizedByteVectorValues vectorValues(int dims, int size, IndexInput in, VectorSimilarityFunction sim) throws IOException { var sq = new ScalarQuantizer(0.1f, 0.9f, (byte) 7); var slice = in.slice("values", 0, in.length()); diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GitInfoPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GitInfoPlugin.java index 28b90714508bd..6f64edfc8c0dc 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GitInfoPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GitInfoPlugin.java @@ -10,47 +10,45 @@ package org.elasticsearch.gradle.internal.conventions; import org.elasticsearch.gradle.internal.conventions.info.GitInfo; +import org.elasticsearch.gradle.internal.conventions.info.GitInfoValueSource; import org.elasticsearch.gradle.internal.conventions.util.Util; import org.gradle.api.Plugin; import org.gradle.api.Project; -import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; -import javax.inject.Inject; import java.io.File; -class GitInfoPlugin implements Plugin { +import javax.inject.Inject; - private ProviderFactory factory; - private ObjectFactory objectFactory; +public abstract class GitInfoPlugin implements Plugin { + private ProviderFactory factory; private Provider revision; - private Property gitInfo; @Inject - GitInfoPlugin(ProviderFactory factory, ObjectFactory objectFactory) { + public GitInfoPlugin(ProviderFactory factory) { this.factory = factory; - this.objectFactory = objectFactory; } @Override public void apply(Project project) { - File rootDir = Util.locateElasticsearchWorkspace(project.getGradle()); - gitInfo = objectFactory.property(GitInfo.class).value(factory.provider(() -> - GitInfo.gitInfo(rootDir) - )); - gitInfo.disallowChanges(); - gitInfo.finalizeValueOnRead(); - - revision = gitInfo.map(info -> info.getRevision() == null ? info.getRevision() : "main"); + File rootDir = getGitRootDir(project); + getGitInfo().convention(factory.of(GitInfoValueSource.class, spec -> { spec.getParameters().getPath().set(rootDir); })); + revision = getGitInfo().map(info -> info.getRevision() == null ? info.getRevision() : "main"); } - public Property getGitInfo() { - return gitInfo; + private static File getGitRootDir(Project project) { + File rootDir = project.getRootDir(); + if (new File(rootDir, ".git").exists()) { + return rootDir; + } + return Util.locateElasticsearchWorkspace(project.getGradle()); } + public abstract Property getGitInfo(); + public Provider getRevision() { return revision; } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/LicensingPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/LicensingPlugin.java index ba170d083c886..63514ae671bf3 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/LicensingPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/LicensingPlugin.java @@ -15,9 +15,10 @@ import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; -import javax.inject.Inject; import java.util.Map; +import javax.inject.Inject; + public class LicensingPlugin implements Plugin { static final String ELASTIC_LICENSE_URL_PREFIX = "https://raw.githubusercontent.com/elastic/elasticsearch/"; static final String ELASTIC_LICENSE_URL_POSTFIX = "/licenses/ELASTIC-LICENSE-2.0.txt"; @@ -33,24 +34,33 @@ public LicensingPlugin(ProviderFactory providerFactory) { @Override public void apply(Project project) { Provider revision = project.getRootProject().getPlugins().apply(GitInfoPlugin.class).getRevision(); - Provider licenseCommitProvider = providerFactory.provider(() -> - isSnapshotVersion(project) ? revision.get() : "v" + project.getVersion() + Provider licenseCommitProvider = providerFactory.provider( + () -> isSnapshotVersion(project) ? revision.get() : "v" + project.getVersion() ); - Provider elasticLicenseURL = licenseCommitProvider.map(licenseCommit -> ELASTIC_LICENSE_URL_PREFIX + - licenseCommit + ELASTIC_LICENSE_URL_POSTFIX); - Provider agplLicenseURL = licenseCommitProvider.map(licenseCommit -> ELASTIC_LICENSE_URL_PREFIX + - licenseCommit + AGPL_ELASTIC_LICENSE_URL_POSTFIX); + Provider elasticLicenseURL = licenseCommitProvider.map( + licenseCommit -> ELASTIC_LICENSE_URL_PREFIX + licenseCommit + ELASTIC_LICENSE_URL_POSTFIX + ); + Provider agplLicenseURL = licenseCommitProvider.map( + licenseCommit -> ELASTIC_LICENSE_URL_PREFIX + licenseCommit + AGPL_ELASTIC_LICENSE_URL_POSTFIX + ); // But stick the Elastic license url in project.ext so we can get it if we need to switch to it project.getExtensions().getExtraProperties().set("elasticLicenseUrl", elasticLicenseURL); - MapProperty licensesProperty = project.getObjects().mapProperty(String.class, String.class).convention( - providerFactory.provider(() -> Map.of( - "Server Side Public License, v 1", "https://www.mongodb.com/licensing/server-side-public-license", - "Elastic License 2.0", elasticLicenseURL.get(), - "GNU Affero General Public License Version 3", agplLicenseURL.get()) + MapProperty> licensesProperty = project.getObjects() + .mapProperty(String.class, (Class>) (Class) Provider.class) + .convention( + providerFactory.provider( + () -> Map.of( + "Server Side Public License, v 1", + providerFactory.provider(() -> "https://www.mongodb.com/licensing/server-side-public-license"), + "Elastic License 2.0", + elasticLicenseURL, + "GNU Affero General Public License Version 3", + agplLicenseURL + ) ) - ); + ); // Default to the SSPL+Elastic dual license project.getExtensions().getExtraProperties().set("projectLicenses", licensesProperty); diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java index c3124812e5089..22b0ab1918024 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/PublishPlugin.java @@ -28,6 +28,7 @@ import org.gradle.api.plugins.JavaLibraryPlugin; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.provider.MapProperty; +import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; @@ -42,6 +43,7 @@ import java.io.File; import java.util.Map; import java.util.concurrent.Callable; + import javax.inject.Inject; public class PublishPlugin implements Plugin { @@ -81,7 +83,7 @@ private void configurePublications(Project project) { } }); @SuppressWarnings("unchecked") - var projectLicenses = (MapProperty) project.getExtensions().getExtraProperties().get("projectLicenses"); + var projectLicenses = (MapProperty>) project.getExtensions().getExtraProperties().get("projectLicenses"); publication.getPom().withXml(xml -> { var node = xml.asNode(); node.appendNode("inceptionYear", "2009"); @@ -89,7 +91,7 @@ private void configurePublications(Project project) { projectLicenses.get().entrySet().stream().sorted(Map.Entry.comparingByKey()).forEach(entry -> { Node license = licensesNode.appendNode("license"); license.appendNode("name", entry.getKey()); - license.appendNode("url", entry.getValue()); + license.appendNode("url", entry.getValue().get()); license.appendNode("distribution", "repo"); }); var developer = node.appendNode("developers").appendNode("developer"); @@ -194,7 +196,6 @@ static void configureSourcesJar(Project project) { }); } - /** * Format the generated pom files to be in a sort of reproducible order. */ diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/GitInfo.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/GitInfo.java index dbd3b3f9c48ad..e6a41093205cc 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/GitInfo.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/GitInfo.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -190,4 +191,15 @@ public String urlFromOrigin() { } } + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) return false; + GitInfo gitInfo = (GitInfo) o; + return Objects.equals(revision, gitInfo.revision) && Objects.equals(origin, gitInfo.origin); + } + + @Override + public int hashCode() { + return Objects.hash(revision, origin); + } } diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/GitInfoValueSource.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/GitInfoValueSource.java new file mode 100644 index 0000000000000..c422c2eb74e39 --- /dev/null +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/info/GitInfoValueSource.java @@ -0,0 +1,22 @@ +package org.elasticsearch.gradle.internal.conventions.info; + +import org.gradle.api.provider.Property; +import org.gradle.api.provider.ValueSource; +import org.gradle.api.provider.ValueSourceParameters; +import org.jetbrains.annotations.Nullable; + +import java.io.File; + +public abstract class GitInfoValueSource implements ValueSource { + + @Nullable + @Override + public GitInfo obtain() { + File path = getParameters().getPath().get(); + return GitInfo.gitInfo(path); + } + + public interface Parameters extends ValueSourceParameters { + Property getPath(); + } +} diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java index a21606156b093..16e8af5832c56 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/util/Util.java @@ -14,18 +14,19 @@ import org.gradle.api.Project; import org.gradle.api.file.FileTree; import org.gradle.api.initialization.IncludedBuild; +import org.gradle.api.internal.GradleInternal; import org.gradle.api.invocation.Gradle; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; import org.gradle.api.tasks.util.PatternFilterable; -import javax.annotation.Nullable; import java.io.File; -import java.util.Collection; import java.util.Optional; import java.util.function.Supplier; +import javax.annotation.Nullable; + public class Util { public static boolean getBooleanProperty(String property, boolean defaultValue) { @@ -120,6 +121,14 @@ public static SourceSetContainer getJavaSourceSets(Project project) { return project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets(); } + public static File getRootFolder(Gradle gradle) { + Gradle parent = gradle.getParent(); + if (parent == null) { + return gradle.getRootProject().getRootDir(); + } + return getRootFolder(parent); + } + public static File locateElasticsearchWorkspace(Gradle gradle) { if (gradle.getRootProject().getName().startsWith("build-tools")) { File buildToolsParent = gradle.getRootProject().getRootDir().getParentFile(); diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index 22286c90de3d1..e712035eabc7b 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a -distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip +distributionSha256Sum=7ebdac923867a3cec0098302416d1e3c6c0c729fc4e2e05c10637a8af33a76c5 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGitAwareGradleFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGitAwareGradleFuncTest.groovy index 01effd52dafef..f7c05894d3e95 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGitAwareGradleFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractGitAwareGradleFuncTest.groovy @@ -24,7 +24,7 @@ abstract class AbstractGitAwareGradleFuncTest extends AbstractGradleFuncTest { def setup() { remoteGitRepo = new File(setupGitRemote(), '.git') - "git clone ${remoteGitRepo.absolutePath} cloned".execute(Collections.emptyList(), testProjectDir.root).waitFor() + execute("git clone ${remoteGitRepo.absolutePath} cloned", testProjectDir.root) buildFile = new File(testProjectDir.root, 'cloned/build.gradle') settingsFile = new File(testProjectDir.root, 'cloned/settings.gradle') } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy index 65f124e5f88e8..cc551057cd600 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/PublishPluginFuncTest.groovy @@ -45,7 +45,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { file("build/distributions/hello-world-1.0-javadoc.jar").exists() file("build/distributions/hello-world-1.0-sources.jar").exists() file("build/distributions/hello-world-1.0.pom").exists() - assertXmlEquals(file("build/distributions/hello-world-1.0.pom").text, """ + assertXmlEquals( + file("build/distributions/hello-world-1.0.pom").text, """ @@ -130,7 +131,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { file("build/distributions/hello-world-1.0-javadoc.jar").exists() file("build/distributions/hello-world-1.0-sources.jar").exists() file("build/distributions/hello-world-1.0.pom").exists() - assertXmlEquals(file("build/distributions/hello-world-1.0.pom").text, """ + assertXmlEquals( + file("build/distributions/hello-world-1.0.pom").text, """ 4.0.0 org.acme @@ -219,7 +221,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { file("build/distributions/hello-world-1.0-javadoc.jar").exists() file("build/distributions/hello-world-1.0-sources.jar").exists() file("build/distributions/hello-world-1.0.pom").exists() - assertXmlEquals(file("build/distributions/hello-world-1.0.pom").text, """ + assertXmlEquals( + file("build/distributions/hello-world-1.0.pom").text, """ 4.0.0 org.acme @@ -282,7 +285,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { esplugin { name = 'hello-world-plugin' - classname 'org.acme.HelloWorldPlugin' + classname = 'org.acme.HelloWorldPlugin' description = "shadowed es plugin" } @@ -312,7 +315,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { file("build/distributions/hello-world-plugin-1.0-javadoc.jar").exists() file("build/distributions/hello-world-plugin-1.0-sources.jar").exists() file("build/distributions/hello-world-plugin-1.0.pom").exists() - assertXmlEquals(file("build/distributions/hello-world-plugin-1.0.pom").text, """ + assertXmlEquals( + file("build/distributions/hello-world-plugin-1.0.pom").text, """ @@ -371,7 +375,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { esplugin { name = 'hello-world-plugin' - classname 'org.acme.HelloWorldPlugin' + classname = 'org.acme.HelloWorldPlugin' description = "custom project description" } @@ -389,7 +393,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { then: result.task(":generatePom").outcome == TaskOutcome.SUCCESS file("build/distributions/hello-world-plugin-2.0.pom").exists() - assertXmlEquals(file("build/distributions/hello-world-plugin-2.0.pom").text, """ + assertXmlEquals( + file("build/distributions/hello-world-plugin-2.0.pom").text, """ @@ -439,7 +444,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { // scm info only added for internal builds internalBuild() buildFile << """ - buildParams.setGitOrigin("https://some-repo.com/repo.git") + buildParams.setGitOrigin(project.providers.provider(() -> "https://some-repo.com/repo.git")) apply plugin:'elasticsearch.java' apply plugin:'elasticsearch.publish' @@ -447,7 +452,7 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { group = 'org.acme' description = "just a test project" - ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) + ext.projectLicenses.set(['The Apache Software License, Version 2.0': project.providers.provider(() -> 'http://www.apache.org/licenses/LICENSE-2.0')]) """ when: @@ -456,7 +461,8 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { then: result.task(":generatePom").outcome == TaskOutcome.SUCCESS file("build/distributions/hello-world-1.0.pom").exists() - assertXmlEquals(file("build/distributions/hello-world-1.0.pom").text, """ + assertXmlEquals( + file("build/distributions/hello-world-1.0.pom").text, """ @@ -493,15 +499,15 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest { private boolean assertXmlEquals(String toTest, String expected) { def diff = DiffBuilder.compare(Input.fromString(expected)) - .ignoreWhitespace() - .ignoreComments() - .normalizeWhitespace() - .withTest(Input.fromString(toTest)) - .build() + .ignoreWhitespace() + .ignoreComments() + .normalizeWhitespace() + .withTest(Input.fromString(toTest)) + .build() diff.differences.each { difference -> println difference } - if(diff.differences.size() > 0) { + if (diff.differences.size() > 0) { println """ given: $toTest """ diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 847eda7a355c0..797dc8bd0641b 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -98,8 +98,8 @@ develocity { link 'Source', "${prBaseUrl}/tree/${System.getenv('BUILDKITE_COMMIT')}" link 'Pull Request', "https://github.com/${repository}/pull/${prId}" } else { - value 'Git Commit ID', gitRevision - link 'Source', "https://github.com/${repository}/tree/${gitRevision}" + value 'Git Commit ID', gitRevision.get() + link 'Source', "https://github.com/${repository}/tree/${gitRevision.get()}" } buildFinished { result -> diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 895cca2af7967..90a4f74b5e9f4 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -10,6 +10,8 @@ import org.elasticsearch.gradle.util.Pair import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.test.TestUtil +import org.elasticsearch.gradle.internal.idea.EnablePreviewFeaturesTask +import org.elasticsearch.gradle.internal.idea.IdeaXmlUtil import org.jetbrains.gradle.ext.JUnit import java.nio.file.Files @@ -24,12 +26,17 @@ allprojects { } } +interface Injected { + @Inject FileSystemOperations getFs() +} + // Applying this stuff, particularly the idea-ext plugin, has a cost so avoid it unless we're running in the IDE if (providers.systemProperty('idea.active').getOrNull() == 'true') { project.apply(plugin: org.jetbrains.gradle.ext.IdeaExtPlugin) def elasticsearchProject = locateElasticsearchWorkspace(gradle) + def rootFolder = project.rootDir tasks.register('configureIdeCheckstyle') { group = 'ide' description = 'Generated a suitable checkstyle config for IDEs' @@ -39,10 +46,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { String checkstyleConfig = "${resources}/checkstyle.xml" String checkstyleSuppressions = "${resources}/checkstyle_suppressions.xml" String checkstyleIdeFragment = "${resources}/checkstyle_ide_fragment.xml" - String checkstyleIdeConfig = "${rootDir}/checkstyle_ide.xml" + String checkstyleIdeConfig = "${rootFolder}/checkstyle_ide.xml" String checkstylePluginConfigTemplate = "${resources}/checkstyle-idea.xml" - String checkstylePluginConfig = "${rootDir}/.idea/checkstyle-idea.xml" + String checkstylePluginConfig = "${rootFolder}/.idea/checkstyle-idea.xml" inputs.files( file(checkstyleConfig), @@ -53,31 +60,33 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { file(checkstyleIdeConfig), file(checkstylePluginConfig) ) + def injected = project.objects.newInstance(Injected) + def projectFolder = project.layout.projectDirectory.asFile doLast { // Configure the IntelliJ Checkstyle plugin by copying a standard file. We don't simply commit // the result to version control, because the plugin has a habit of modifying the file and // replacing the `$PROJECT_DIR$` placeholders, which developers must then revert. - project.copy { + injected.fs.copy { from(checkstylePluginConfigTemplate) - into("${rootDir}/.idea") + into("${rootFolder}/.idea") expand(jarLocation: buildConventionsJar, configLocation: checkstyleIdeConfig) } // Create an IDE-specific checkstyle config by first copying the standard config Files.copy( - Paths.get(file(checkstyleConfig).getPath()), - Paths.get(file(checkstyleIdeConfig).getPath()), + Paths.get(new File(checkstyleConfig).getPath()), + Paths.get(new File(checkstyleIdeConfig).getPath()), StandardCopyOption.REPLACE_EXISTING ) // There are some rules that we only want to enable in an IDE. These // are extracted to a separate file, and merged into the IDE-specific // Checkstyle config. - Node xmlFragment = parseXml(checkstyleIdeFragment) + Node xmlFragment = IdeaXmlUtil.parseXml(checkstyleIdeFragment) // Edit the copy so that IntelliJ can copy with it - modifyXml(checkstyleIdeConfig, { xml -> + IdeaXmlUtil.modifyXml(checkstyleIdeConfig, { xml -> // Add all the nodes from the fragment file Node treeWalker = xml.module.find { it.'@name' == 'TreeWalker' } xmlFragment.module.each { treeWalker.append(it) } @@ -103,7 +112,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { description = 'Configures the appropriate JVM for Gradle' doLast { - modifyXml('.idea/gradle.xml') { xml -> + IdeaXmlUtil.modifyXml('.idea/gradle.xml') { xml -> def gradleSettings = xml.component.find { it.'@name' == 'GradleSettings' }.option[0].GradleProjectSettings // Remove configured JVM option to force IntelliJ to use the project JDK for Gradle gradleSettings.option.findAll { it.'@name' == 'gradleJvm' }.each { it.parent().remove(it) } @@ -127,7 +136,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { description = 'Enable per-module *.iml files' doLast { - modifyXml('.idea/misc.xml') {xml -> + IdeaXmlUtil.modifyXml('.idea/misc.xml') {xml -> def externalStorageConfig = xml.component.find { it.'@name' == 'ExternalStorageConfigurationManager' } if (externalStorageConfig) { xml.remove(externalStorageConfig) @@ -137,19 +146,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } // modifies the idea module config to enable preview features on ':libs:native' module - tasks.register("enablePreviewFeatures") { + tasks.register("enablePreviewFeatures", EnablePreviewFeaturesTask) { group = 'ide' description = 'Enables preview features on native library module' dependsOn tasks.named("enableExternalConfiguration") - - ext { - enablePreview = { moduleFile, languageLevel -> - modifyXml(moduleFile) { xml -> - xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = languageLevel - } - } - } - doLast { enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW') @@ -159,7 +159,7 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { tasks.register('buildDependencyArtifacts') { group = 'ide' description = 'Builds artifacts needed as dependency for IDE modules' - dependsOn([':plugins:repository-hdfs:hadoop-client-api:shadowJar', + dependsOn([':plugins:repository-hdfs:hadoop-client-api:jar', ':x-pack:plugin:esql:compute:ann:jar', ':x-pack:plugin:esql:compute:gen:jar', ':server:generateModulesList', @@ -270,42 +270,6 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } } -/** - * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. - * - * @param path Path to existing XML file - * @param action Action to perform on parsed XML document - * @param preface optional front matter to add after the XML declaration - * but before the XML document, e.g. a doctype or comment - */ -void modifyXml(Object path, Action action, String preface = null) { - if (project.file(path).exists()) { - Node xml = parseXml(path) - action.execute(xml) - - File xmlFile = project.file(path) - xmlFile.withPrintWriter { writer -> - def printer = new XmlNodePrinter(writer) - printer.namespaceAware = true - printer.preserveWhitespace = true - writer.write("\n") - - if (preface != null) { - writer.write(preface) - } - printer.print(xml) - } - } -} - -Node parseXml(Object path) { - File xmlFile = project.file(path) - XmlParser xmlParser = new XmlParser(false, true, true) - xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) - Node xml = xmlParser.parse(xmlFile) - return xml -} - Pair locateElasticsearchWorkspace(Gradle gradle) { if (gradle.parent == null) { // See if any of these included builds is the Elasticsearch gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index d54eb798ce783..6dfb337a22ac4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -22,7 +22,7 @@ public enum DockerBase { // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off - WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:1b51ff6dba78c98d3e02b0cd64a8ce3238c7a40408d21e3af12a329d44db6f23", + WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:eef54b3a414aa53b98f0f8df2633aed83c3ba6230722769282925442968f0364", "-wolfi", "apk" ), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java index e62c26c7fbc01..a4477d049460c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaPlugin.java @@ -27,15 +27,20 @@ import org.gradle.api.plugins.JavaLibraryPlugin; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.provider.Property; +import org.gradle.api.provider.Provider; import org.gradle.api.tasks.TaskProvider; import org.gradle.api.tasks.bundling.Jar; import org.gradle.api.tasks.javadoc.Javadoc; import org.gradle.external.javadoc.CoreJavadocOptions; +import org.gradle.jvm.toolchain.JavaLanguageVersion; +import org.gradle.jvm.toolchain.JavaToolchainService; import org.gradle.language.base.plugins.LifecycleBasePlugin; import java.io.File; import java.util.Map; +import javax.inject.Inject; + import static org.elasticsearch.gradle.internal.conventions.util.Util.toStringable; import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; @@ -44,6 +49,14 @@ * common configuration for production code. */ public class ElasticsearchJavaPlugin implements Plugin { + + private final JavaToolchainService javaToolchains; + + @Inject + ElasticsearchJavaPlugin(JavaToolchainService javaToolchains) { + this.javaToolchains = javaToolchains; + } + @Override public void apply(Project project) { project.getRootProject().getPlugins().apply(GlobalBuildInfoPlugin.class); @@ -55,7 +68,7 @@ public void apply(Project project) { // configureConfigurations(project); configureJars(project, buildParams.get()); configureJarManifest(project, buildParams.get()); - configureJavadoc(project); + configureJavadoc(project, buildParams.get()); testCompileOnlyDeps(project); } @@ -108,12 +121,12 @@ public void execute(Task task) { } private static void configureJarManifest(Project project, BuildParameterExtension buildParams) { - String gitOrigin = buildParams.getGitOrigin(); - String gitRevision = buildParams.getGitRevision(); + Provider gitOrigin = buildParams.getGitOrigin(); + Provider gitRevision = buildParams.getGitRevision(); project.getPlugins().withType(InfoBrokerPlugin.class).whenPluginAdded(manifestPlugin -> { - manifestPlugin.add("Module-Origin", toStringable(() -> gitOrigin)); - manifestPlugin.add("Change", toStringable(() -> gitRevision)); + manifestPlugin.add("Module-Origin", toStringable(() -> gitOrigin.get())); + manifestPlugin.add("Change", toStringable(() -> gitRevision.get())); manifestPlugin.add("X-Compile-Elasticsearch-Version", toStringable(VersionProperties::getElasticsearch)); manifestPlugin.add("X-Compile-Lucene-Version", toStringable(VersionProperties::getLucene)); manifestPlugin.add( @@ -128,7 +141,7 @@ private static void configureJarManifest(Project project, BuildParameterExtensio project.getPluginManager().apply("nebula.info-jar"); } - private static void configureJavadoc(Project project) { + private void configureJavadoc(Project project, BuildParameterExtension buildParams) { project.getTasks().withType(Javadoc.class).configureEach(javadoc -> { /* * Generate docs using html5 to suppress a warning from `javadoc` @@ -136,6 +149,10 @@ private static void configureJavadoc(Project project) { */ CoreJavadocOptions javadocOptions = (CoreJavadocOptions) javadoc.getOptions(); javadocOptions.addBooleanOption("html5", true); + + javadoc.getJavadocTool().set(javaToolchains.javadocToolFor(spec -> { + spec.getLanguageVersion().set(JavaLanguageVersion.of(buildParams.getMinimumRuntimeVersion().getMajorVersion())); + })); }); TaskProvider javadoc = project.getTasks().withType(Javadoc.class).named("javadoc"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java index f7e2f3d0d6c30..c2547b72e21fa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java @@ -134,14 +134,14 @@ private void registerEmptyDirectoryTasks(Project project) { }); File pluginsDir = new File(project.getBuildDir(), "plugins-hack/plugins"); - project.getExtensions().add("pluginsDir", pluginsDir); + project.getExtensions().getExtraProperties().set("pluginsDir", pluginsDir); project.getTasks().register("createPluginsDir", EmptyDirTask.class, t -> { t.setDir(pluginsDir); t.setDirMode(0755); }); File jvmOptionsDir = new File(project.getBuildDir(), "jvm-options-hack/jvm.options.d"); - project.getExtensions().add("jvmOptionsDir", jvmOptionsDir); + project.getExtensions().getExtraProperties().set("jvmOptionsDir", jvmOptionsDir); project.getTasks().register("createJvmOptionsDir", EmptyDirTask.class, t -> { t.setDir(jvmOptionsDir); t.setDirMode(0750); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index da26cb66122ad..c38ea5b4f0850 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -17,12 +17,12 @@ import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; +import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.ProjectLayout; import org.gradle.api.model.ObjectFactory; import org.gradle.api.plugins.JvmToolchainsPlugin; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; -import org.gradle.api.tasks.Copy; import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskProvider; import org.gradle.jvm.toolchain.JavaToolchainService; @@ -54,11 +54,17 @@ public class InternalDistributionBwcSetupPlugin implements Plugin { private final ObjectFactory objectFactory; private ProviderFactory providerFactory; private JavaToolchainService toolChainService; + private FileSystemOperations fileSystemOperations; @Inject - public InternalDistributionBwcSetupPlugin(ObjectFactory objectFactory, ProviderFactory providerFactory) { + public InternalDistributionBwcSetupPlugin( + ObjectFactory objectFactory, + ProviderFactory providerFactory, + FileSystemOperations fileSystemOperations + ) { this.objectFactory = objectFactory; this.providerFactory = providerFactory; + this.fileSystemOperations = fileSystemOperations; } @Override @@ -76,7 +82,8 @@ public void apply(Project project) { providerFactory, objectFactory, toolChainService, - isCi + isCi, + fileSystemOperations ); }); } @@ -88,7 +95,8 @@ private static void configureBwcProject( ProviderFactory providerFactory, ObjectFactory objectFactory, JavaToolchainService toolChainService, - Boolean isCi + Boolean isCi, + FileSystemOperations fileSystemOperations ) { ProjectLayout layout = project.getLayout(); Provider versionInfoProvider = providerFactory.provider(() -> versionInfo); @@ -120,11 +128,20 @@ private static void configureBwcProject( List distributionProjects = resolveArchiveProjects(checkoutDir.get(), bwcVersion.get()); // Setup gradle user home directory - project.getTasks().register("setupGradleUserHome", Copy.class, copy -> { - copy.into(project.getGradle().getGradleUserHomeDir().getAbsolutePath() + "-" + project.getName()); - copy.from(project.getGradle().getGradleUserHomeDir().getAbsolutePath(), copySpec -> { - copySpec.include("gradle.properties"); - copySpec.include("init.d/*"); + // We don't use a normal `Copy` task here as snapshotting the entire gradle user home is very expensive. This task is cheap, so + // up-to-date checking doesn't buy us much + project.getTasks().register("setupGradleUserHome", task -> { + File gradleUserHome = project.getGradle().getGradleUserHomeDir(); + String projectName = project.getName(); + task.doLast(t -> { + fileSystemOperations.copy(copy -> { + String absoluteGradleUserHomePath = gradleUserHome.getAbsolutePath(); + copy.into(absoluteGradleUserHomePath + "-" + projectName); + copy.from(absoluteGradleUserHomePath, copySpec -> { + copySpec.include("gradle.properties"); + copySpec.include("init.d/*"); + }); + }); }); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java index 7c488e6e73fee..5402e0a04fe8f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/MrjarPlugin.java @@ -86,14 +86,14 @@ public void apply(Project project) { configurePreviewFeatures(project, javaExtension.getSourceSets().getByName(SourceSet.TEST_SOURCE_SET_NAME), 21); for (int javaVersion : mainVersions) { String mainSourceSetName = SourceSet.MAIN_SOURCE_SET_NAME + javaVersion; - SourceSet mainSourceSet = addSourceSet(project, javaExtension, mainSourceSetName, mainSourceSets, javaVersion); + SourceSet mainSourceSet = addSourceSet(project, javaExtension, mainSourceSetName, mainSourceSets, javaVersion, true); configureSourceSetInJar(project, mainSourceSet, javaVersion); addJar(project, mainSourceSet, javaVersion); mainSourceSets.add(mainSourceSetName); testSourceSets.add(mainSourceSetName); String testSourceSetName = SourceSet.TEST_SOURCE_SET_NAME + javaVersion; - SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion); + SourceSet testSourceSet = addSourceSet(project, javaExtension, testSourceSetName, testSourceSets, javaVersion, false); testSourceSets.add(testSourceSetName); createTestTask(project, buildParams, testSourceSet, javaVersion, mainSourceSets); } @@ -121,7 +121,8 @@ private SourceSet addSourceSet( JavaPluginExtension javaExtension, String sourceSetName, List parentSourceSets, - int javaVersion + int javaVersion, + boolean isMainSourceSet ) { SourceSet sourceSet = javaExtension.getSourceSets().maybeCreate(sourceSetName); for (String parentSourceSetName : parentSourceSets) { @@ -135,6 +136,13 @@ private SourceSet addSourceSet( CompileOptions compileOptions = compileTask.getOptions(); compileOptions.getRelease().set(javaVersion); }); + if (isMainSourceSet) { + project.getTasks().create(sourceSet.getJavadocTaskName(), Javadoc.class, javadocTask -> { + javadocTask.getJavadocTool().set(javaToolchains.javadocToolFor(spec -> { + spec.getLanguageVersion().set(JavaLanguageVersion.of(javaVersion)); + })); + }); + } configurePreviewFeatures(project, sourceSet, javaVersion); // Since we configure MRJAR sourcesets to allow preview apis, class signatures for those diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java index 28776f03d17e8..c44b1212e6489 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java @@ -14,14 +14,14 @@ import java.util.List; public class InternalElasticsearchDistributionTypes { - public static ElasticsearchDistributionType DEB = new DebElasticsearchDistributionType(); - public static ElasticsearchDistributionType RPM = new RpmElasticsearchDistributionType(); - public static ElasticsearchDistributionType DOCKER = new DockerElasticsearchDistributionType(); - public static ElasticsearchDistributionType DOCKER_IRONBANK = new DockerIronBankElasticsearchDistributionType(); - public static ElasticsearchDistributionType DOCKER_CLOUD_ESS = new DockerCloudEssElasticsearchDistributionType(); - public static ElasticsearchDistributionType DOCKER_WOLFI = new DockerWolfiElasticsearchDistributionType(); + public static final ElasticsearchDistributionType DEB = new DebElasticsearchDistributionType(); + public static final ElasticsearchDistributionType RPM = new RpmElasticsearchDistributionType(); + public static final ElasticsearchDistributionType DOCKER = new DockerElasticsearchDistributionType(); + public static final ElasticsearchDistributionType DOCKER_IRONBANK = new DockerIronBankElasticsearchDistributionType(); + public static final ElasticsearchDistributionType DOCKER_CLOUD_ESS = new DockerCloudEssElasticsearchDistributionType(); + public static final ElasticsearchDistributionType DOCKER_WOLFI = new DockerWolfiElasticsearchDistributionType(); - public static List ALL_INTERNAL = List.of( + public static final List ALL_INTERNAL = List.of( DEB, RPM, DOCKER, diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java new file mode 100644 index 0000000000000..f8c8b5127827f --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.NodeList; + +import org.gradle.api.DefaultTask; +import org.xml.sax.SAXException; + +import java.io.IOException; + +import javax.xml.parsers.ParserConfigurationException; + +public class EnablePreviewFeaturesTask extends DefaultTask { + + public void enablePreview(String moduleFile, String languageLevel) throws IOException, ParserConfigurationException, SAXException { + IdeaXmlUtil.modifyXml(moduleFile, xml -> { + // Find the 'component' node + NodeList nodes = (NodeList) xml.depthFirst(); + Node componentNode = null; + for (Object node : nodes) { + Node currentNode = (Node) node; + if ("component".equals(currentNode.name()) && "NewModuleRootManager".equals(currentNode.attribute("name"))) { + componentNode = currentNode; + break; + } + } + + // Add the attribute to the 'component' node + if (componentNode != null) { + componentNode.attributes().put("LANGUAGE_LEVEL", languageLevel); + } + }); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java new file mode 100644 index 0000000000000..b7cc2862a0af1 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.XmlParser; +import groovy.xml.XmlNodePrinter; + +import org.gradle.api.Action; +import org.xml.sax.SAXException; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; + +import javax.xml.parsers.ParserConfigurationException; + +public class IdeaXmlUtil { + + static Node parseXml(String xmlPath) throws IOException, SAXException, ParserConfigurationException { + File xmlFile = new File(xmlPath); + XmlParser xmlParser = new XmlParser(false, true, true); + xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + Node xml = xmlParser.parse(xmlFile); + return xml; + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action) throws IOException, ParserConfigurationException, SAXException { + modifyXml(xmlPath, action, null); + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * @param preface optional front matter to add after the XML declaration + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action, String preface) throws IOException, ParserConfigurationException, + SAXException { + File xmlFile = new File(xmlPath); + if (xmlFile.exists()) { + Node xml = parseXml(xmlPath); + action.execute(xml); + + try (PrintWriter writer = new PrintWriter(xmlFile)) { + var printer = new XmlNodePrinter(writer); + printer.setNamespaceAware(true); + printer.setPreserveWhitespace(true); + writer.write("\n"); + if (preface != null) { + writer.write(preface); + } + printer.print(xml); + } + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java index e80dc6ef1b44c..ef9055b3728d3 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParameterExtension.java @@ -46,9 +46,9 @@ public interface BuildParameterExtension { Provider getRuntimeJavaDetails(); - String getGitRevision(); + Provider getGitRevision(); - String getGitOrigin(); + Provider getGitOrigin(); ZonedDateTime getBuildDate(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/DefaultBuildParameterExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/DefaultBuildParameterExtension.java index faac406d974c6..283c02428e4e6 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/DefaultBuildParameterExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/DefaultBuildParameterExtension.java @@ -36,7 +36,7 @@ public abstract class DefaultBuildParameterExtension implements BuildParameterEx private final Provider runtimeJavaVersion; private final Provider> javaToolChainSpec; private final Provider runtimeJavaDetails; - private final String gitRevision; + private final Provider gitRevision; private transient AtomicReference buildDate = new AtomicReference<>(); private final String testSeed; @@ -46,7 +46,7 @@ public abstract class DefaultBuildParameterExtension implements BuildParameterEx // not final for testing private Provider bwcVersions; - private String gitOrigin; + private Provider gitOrigin; public DefaultBuildParameterExtension( ProviderFactory providers, @@ -59,8 +59,8 @@ public DefaultBuildParameterExtension( JavaVersion minimumCompilerVersion, JavaVersion minimumRuntimeVersion, JavaVersion gradleJavaVersion, - String gitRevision, - String gitOrigin, + Provider gitRevision, + Provider gitOrigin, String testSeed, boolean isCi, int defaultParallel, @@ -155,12 +155,12 @@ public Provider getRuntimeJavaDetails() { } @Override - public String getGitRevision() { + public Provider getGitRevision() { return gitRevision; } @Override - public String getGitOrigin() { + public Provider getGitOrigin() { return gitOrigin; } @@ -239,7 +239,7 @@ public void setBwcVersions(Provider bwcVersions) { } // for testing; not part of public api - public void setGitOrigin(String gitOrigin) { + public void setGitOrigin(Provider gitOrigin) { this.gitOrigin = gitOrigin; } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 86f59aa0ab41e..675f1198b2a7d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -14,6 +14,7 @@ import org.apache.commons.io.IOUtils; import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.BwcVersions; +import org.elasticsearch.gradle.internal.conventions.GitInfoPlugin; import org.elasticsearch.gradle.internal.conventions.info.GitInfo; import org.elasticsearch.gradle.internal.conventions.info.ParallelDetector; import org.elasticsearch.gradle.internal.conventions.util.Util; @@ -96,6 +97,8 @@ public void apply(Project project) { } this.project = project; project.getPlugins().apply(JvmToolchainsPlugin.class); + Provider gitInfo = project.getPlugins().apply(GitInfoPlugin.class).getGitInfo(); + toolChainService = project.getExtensions().getByType(JavaToolchainService.class); GradleVersion minimumGradleVersion = GradleVersion.version(getResourceContents("/minimumGradleVersion")); if (GradleVersion.current().compareTo(minimumGradleVersion) < 0) { @@ -111,8 +114,6 @@ public void apply(Project project) { ? explicitRuntimeJavaHome : resolveJavaHomeFromToolChainService(VersionProperties.getBundledJdkMajorVersion()); - GitInfo gitInfo = GitInfo.gitInfo(project.getRootDir()); - Provider runtimeJdkMetaData = actualRuntimeJavaHome.map( runtimeJavaHome -> metadataDetector.getMetadata(getJavaInstallation(runtimeJavaHome)) ); @@ -143,8 +144,8 @@ public void apply(Project project) { minimumCompilerVersion, minimumRuntimeVersion, Jvm.current().getJavaVersion(), - gitInfo.getRevision(), - gitInfo.getOrigin(), + gitInfo.map(g -> g.getRevision()), + gitInfo.map(g -> g.getOrigin()), getTestSeed(), System.getenv("JENKINS_URL") != null || System.getenv("BUILDKITE_BUILD_URL") != null || System.getProperty("isCI") != null, ParallelDetector.findDefaultParallel(project), diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java index dbbe35905d208..dc8ea4424ba89 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckstylePrecommitPlugin.java @@ -42,7 +42,8 @@ public TaskProvider createTask(Project project) { File checkstyleDir = new File(project.getBuildDir(), "checkstyle"); File checkstyleSuppressions = new File(checkstyleDir, "checkstyle_suppressions.xml"); File checkstyleConf = new File(checkstyleDir, "checkstyle.xml"); - TaskProvider copyCheckstyleConf = project.getTasks().register("copyCheckstyleConf"); + TaskProvider copyCheckstyleConf = project.getTasks() + .register("copyCheckstyleConf", CopyCheckStyleConfTask.class); // configure inputs and outputs so up to date works properly copyCheckstyleConf.configure(t -> t.getOutputs().files(checkstyleSuppressions, checkstyleConf)); if ("jar".equals(checkstyleConfUrl.getProtocol())) { diff --git a/server/src/main/java/org/elasticsearch/inference/EmptySettingsConfiguration.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CopyCheckStyleConfTask.java similarity index 61% rename from server/src/main/java/org/elasticsearch/inference/EmptySettingsConfiguration.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CopyCheckStyleConfTask.java index 8a3f96750f2ea..9e0f9c24bcef8 100644 --- a/server/src/main/java/org/elasticsearch/inference/EmptySettingsConfiguration.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CopyCheckStyleConfTask.java @@ -7,13 +7,15 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.inference; +package org.elasticsearch.gradle.internal.precommit; -import java.util.Collections; -import java.util.Map; +import org.gradle.api.DefaultTask; +import org.gradle.api.file.FileSystemOperations; -public class EmptySettingsConfiguration { - public static Map get() { - return Collections.emptyMap(); - } +import javax.inject.Inject; + +public abstract class CopyCheckStyleConfTask extends DefaultTask { + + @Inject + public abstract FileSystemOperations getFs(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java index e45a1d3dd25b1..7046a22204efa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditPrecommitPlugin.java @@ -16,12 +16,14 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.component.ModuleComponentIdentifier; +import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.TaskProvider; import java.io.File; import java.nio.file.Path; import static org.elasticsearch.gradle.internal.util.DependenciesUtils.createFileCollectionFromNonTransitiveArtifactsView; +import static org.elasticsearch.gradle.internal.util.DependenciesUtils.thirdPartyDependenciesView; import static org.elasticsearch.gradle.internal.util.ParamsUtils.loadBuildParams; public class ThirdPartyAuditPrecommitPlugin extends PrecommitPlugin { @@ -47,7 +49,6 @@ public TaskProvider createTask(Project project) { project.getDependencies().add(JDK_JAR_HELL_CONFIG_NAME, elasticsearchCoreProject); } } - TaskProvider resourcesTask = project.getTasks() .register("thirdPartyAuditResources", ExportElasticsearchBuildResourcesTask.class); Path resourcesDir = project.getBuildDir().toPath().resolve("third-party-audit-config"); @@ -59,9 +60,11 @@ public TaskProvider createTask(Project project) { // usually only one task is created. but this construct makes our integTests easier to setup project.getTasks().withType(ThirdPartyAuditTask.class).configureEach(t -> { Configuration runtimeConfiguration = project.getConfigurations().getByName("runtimeClasspath"); + FileCollection runtimeThirdParty = thirdPartyDependenciesView(runtimeConfiguration); Configuration compileOnly = project.getConfigurations() .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME); - t.setClasspath(runtimeConfiguration.plus(compileOnly)); + FileCollection compileOnlyThirdParty = thirdPartyDependenciesView(compileOnly); + t.getThirdPartyClasspath().from(runtimeThirdParty, compileOnlyThirdParty); t.getJarsToScan() .from( createFileCollectionFromNonTransitiveArtifactsView( @@ -78,7 +81,7 @@ public TaskProvider createTask(Project project) { t.getJavaHome().set(buildParams.flatMap(params -> params.getRuntimeJavaHome()).map(File::getPath)); t.setSignatureFile(resourcesDir.resolve("forbidden/third-party-audit.txt").toFile()); t.getJdkJarHellClasspath().from(jdkJarHellConfig); - t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnly)); + t.getForbiddenAPIsClasspath().from(project.getConfigurations().getByName("forbiddenApisCliJar").plus(compileOnlyThirdParty)); }); return audit; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java index 442797775de2f..59ba9bae0a57d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/ThirdPartyAuditTask.java @@ -17,7 +17,6 @@ import org.gradle.api.JavaVersion; import org.gradle.api.file.ArchiveOperations; import org.gradle.api.file.ConfigurableFileCollection; -import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; import org.gradle.api.file.ProjectLayout; @@ -96,8 +95,6 @@ public abstract class ThirdPartyAuditTask extends DefaultTask { private final ProjectLayout projectLayout; - private FileCollection classpath; - @Inject public ThirdPartyAuditTask( ArchiveOperations archiveOperations, @@ -198,9 +195,7 @@ public Set getMissingClassExcludes() { public abstract Property getRuntimeJavaVersion(); @Classpath - public FileCollection getClasspath() { - return classpath; - } + public abstract ConfigurableFileCollection getThirdPartyClasspath(); @TaskAction public void runThirdPartyAudit() throws IOException { @@ -345,7 +340,7 @@ private String runForbiddenAPIsCli() throws IOException { if (javaHome.isPresent()) { spec.setExecutable(javaHome.get() + "/bin/java"); } - spec.classpath(getForbiddenAPIsClasspath(), classpath); + spec.classpath(getForbiddenAPIsClasspath(), getThirdPartyClasspath()); // Enable explicitly for each release as appropriate. Just JDK 20/21/22/23 for now, and just the vector module. if (isJavaVersion(VERSION_20) || isJavaVersion(VERSION_21) || isJavaVersion(VERSION_22) || isJavaVersion(VERSION_23)) { spec.jvmArgs("--add-modules", "jdk.incubator.vector"); @@ -383,7 +378,7 @@ private boolean isJavaVersion(JavaVersion version) { private Set runJdkJarHellCheck() throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); ExecResult execResult = execOperations.javaexec(spec -> { - spec.classpath(getJdkJarHellClasspath(), classpath); + spec.classpath(getJdkJarHellClasspath(), getThirdPartyClasspath()); spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS); spec.args(getJarExpandDir()); spec.setIgnoreExitValue(true); @@ -402,8 +397,4 @@ private Set runJdkJarHellCheck() throws IOException { return new TreeSet<>(Arrays.asList(jdkJarHellCheckList.split("\\r?\\n"))); } - public void setClasspath(FileCollection classpath) { - this.classpath = classpath; - } - } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 211718c151ba9..08e3c92307d72 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -31,6 +31,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.Provider; import org.gradle.api.specs.Specs; @@ -88,8 +89,8 @@ public void apply(Project project) { Map> versionTasks = versionTasks(project, "destructiveDistroUpgradeTest", buildParams.getBwcVersions()); TaskProvider destructiveDistroTest = project.getTasks().register("destructiveDistroTest"); - Configuration examplePlugin = configureExamplePlugin(project); - + Configuration examplePluginConfiguration = configureExamplePlugin(project); + FileCollection examplePluginFileCollection = examplePluginConfiguration; List> windowsTestTasks = new ArrayList<>(); Map>> linuxTestTasks = new HashMap<>(); @@ -102,9 +103,9 @@ public void apply(Project project) { t2 -> distribution.isDocker() == false || dockerSupport.get().getDockerAvailability().isAvailable() ); addDistributionSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); - addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePlugin.getSingleFile().toString()); + addDistributionSysprop(t, EXAMPLE_PLUGIN_SYSPROP, () -> examplePluginFileCollection.getSingleFile().toString()); t.exclude("**/PackageUpgradeTests.class"); - }, distribution, examplePlugin.getDependencies()); + }, distribution, examplePluginConfiguration.getDependencies()); if (distribution.getPlatform() == Platform.WINDOWS) { windowsTestTasks.add(destructiveTask); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java index f0b28f5381b98..80e64ad813230 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java @@ -172,6 +172,11 @@ public Destination getDestination() { return Destination.StdErr; } + @Override + public long getLogTime() { + return result.getEndTime(); + } + @Override public String getMessage() { return message; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rerun/TestRerunTaskExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rerun/TestRerunTaskExtension.java index f08d3dadc1bb0..8e4519b55e15b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rerun/TestRerunTaskExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rerun/TestRerunTaskExtension.java @@ -30,7 +30,7 @@ public class TestRerunTaskExtension { /** * The name of the extension added to each test task. */ - public static String NAME = "rerun"; + public static final String NAME = "rerun"; private final Property maxReruns; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java index 8fbe40aa82399..bb26bfd16721d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolver.java @@ -54,16 +54,16 @@ public String url(String os, String arch, String extension) { } } - record EarlyAccessJdkBuild(JavaLanguageVersion languageVersion, String version, String buildNumber) implements JdkBuild { - + record EarlyAccessJdkBuild(JavaLanguageVersion languageVersion) implements JdkBuild { @Override public String url(String os, String arch, String extension) { + String buildNumber = resolveBuildNumber(languageVersion.asInt()); return "https://download.java.net/java/early_access/jdk" - + version + + languageVersion.asInt() + "/" - + version + + buildNumber + "/GPL/openjdk-" - + version + + languageVersion.asInt() + "-ea+" + buildNumber + "_" @@ -73,6 +73,29 @@ public String url(String os, String arch, String extension) { + "_bin." + extension; } + + private static String resolveBuildNumber(int version) { + String buildNumber = System.getProperty("runtime.java." + version + ".build"); + if (buildNumber != null) { + System.out.println("buildNumber = " + buildNumber); + return buildNumber; + } + buildNumber = System.getProperty("runtime.java.build"); + if (buildNumber != null) { + System.out.println("buildNumber2 = " + buildNumber); + return buildNumber; + } + + switch (version) { + case 24: + // latest explicitly found build number for 24 + return "29"; + case 25: + return "3"; + default: + throw new IllegalArgumentException("Unsupported version " + version); + } + } } private static final Pattern VERSION_PATTERN = Pattern.compile( @@ -88,8 +111,8 @@ public String url(String os, String arch, String extension) { // package private so it can be replaced by tests List builds = List.of( getBundledJdkBuild(), - // 23 early access - new EarlyAccessJdkBuild(JavaLanguageVersion.of(23), "23", "24") + new EarlyAccessJdkBuild(JavaLanguageVersion.of(24)), + new EarlyAccessJdkBuild(JavaLanguageVersion.of(25)) ); private JdkBuild getBundledJdkBuild() { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java index 9080f62f19937..5d7386e2c2150 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/util/DependenciesUtils.java @@ -9,12 +9,16 @@ package org.elasticsearch.gradle.internal.util; +import com.github.jengelman.gradle.plugins.shadow.ShadowBasePlugin; + import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ResolvableDependencies; import org.gradle.api.artifacts.component.ComponentIdentifier; +import org.gradle.api.artifacts.component.ProjectComponentIdentifier; import org.gradle.api.artifacts.result.ResolvedComponentResult; import org.gradle.api.artifacts.result.ResolvedDependencyResult; import org.gradle.api.file.FileCollection; +import org.gradle.api.provider.Provider; import org.gradle.api.specs.AndSpec; import org.gradle.api.specs.Spec; @@ -29,7 +33,7 @@ public static FileCollection createFileCollectionFromNonTransitiveArtifactsView( ) { ResolvableDependencies incoming = configuration.getIncoming(); return incoming.artifactView(viewConfiguration -> { - Set firstLevelDependencyComponents = incoming.getResolutionResult() + Provider> firstLevelDependencyComponents = incoming.getResolutionResult() .getRootComponent() .map( rootComponent -> rootComponent.getDependencies() @@ -39,12 +43,36 @@ public static FileCollection createFileCollectionFromNonTransitiveArtifactsView( .filter(dependency -> dependency.getSelected() instanceof ResolvedComponentResult) .map(dependency -> dependency.getSelected().getId()) .collect(Collectors.toSet()) - ) - .get(); + ); viewConfiguration.componentFilter( - new AndSpec<>(identifier -> firstLevelDependencyComponents.contains(identifier), componentFilter) + new AndSpec<>(identifier -> firstLevelDependencyComponents.get().contains(identifier), componentFilter) ); }).getFiles(); } + /** + * This method gives us an artifact view of a configuration that filters out all + * project dependencies that are not shadowed jars. + * Basically a thirdparty only view of the dependency tree. + */ + public static FileCollection thirdPartyDependenciesView(Configuration configuration) { + ResolvableDependencies incoming = configuration.getIncoming(); + return incoming.artifactView(v -> { + // resolve componentIdentifier for all shadowed project dependencies + Provider> shadowedDependencies = incoming.getResolutionResult() + .getRootComponent() + .map( + root -> root.getDependencies() + .stream() + .filter(dep -> dep instanceof ResolvedDependencyResult) + .map(dep -> (ResolvedDependencyResult) dep) + .filter(dep -> dep.getResolvedVariant().getDisplayName() == ShadowBasePlugin.COMPONENT_NAME) + .filter(dep -> dep.getSelected() instanceof ResolvedComponentResult) + .map(dep -> dep.getSelected().getId()) + .collect(Collectors.toSet()) + ); + // filter out project dependencies if they are not a shadowed dependency + v.componentFilter(i -> (i instanceof ProjectComponentIdentifier == false || shadowedDependencies.get().contains(i))); + }).getFiles(); + } } diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index 451701d74d690..9692af7adc5e6 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -279,6 +279,7 @@ "compatibilityChangeArea": { "type": "string", "enum": [ + "Aggregations", "Analysis", "Authorization", "Cluster and node setting", @@ -295,6 +296,7 @@ "Painless", "REST API", "Rollup", + "Search", "System requirement", "Transform" ] diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index a9da7995c2b36..53480a4a27b0b 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -155,10 +155,8 @@ org.elasticsearch.cluster.ClusterState#compatibilityVersions() @defaultMessage ClusterFeatures#nodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. org.elasticsearch.cluster.ClusterFeatures#nodeFeatures() -@defaultMessage ClusterFeatures#allNodeFeatures is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. -org.elasticsearch.cluster.ClusterFeatures#allNodeFeatures() @defaultMessage ClusterFeatures#clusterHasFeature is for internal use only. Use FeatureService#clusterHasFeature to determine if a feature is present on the cluster. -org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.features.NodeFeature) +org.elasticsearch.cluster.ClusterFeatures#clusterHasFeature(org.elasticsearch.cluster.node.DiscoveryNodes, org.elasticsearch.features.NodeFeature) @defaultMessage Do not construct this records outside the source files they are declared in org.elasticsearch.cluster.SnapshotsInProgress$ShardSnapshotStatus#(java.lang.String, org.elasticsearch.cluster.SnapshotsInProgress$ShardState, org.elasticsearch.repositories.ShardGeneration, java.lang.String, org.elasticsearch.repositories.ShardSnapshotResult) diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index 876e3136ea819..9c57ca327c7b7 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.11.1 \ No newline at end of file +8.12 \ No newline at end of file diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/info/BuildParameterExtensionSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/info/BuildParameterExtensionSpec.groovy index 343268b9b4d47..ce63069a873ab 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/info/BuildParameterExtensionSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/info/BuildParameterExtensionSpec.groovy @@ -9,9 +9,9 @@ package org.elasticsearch.gradle.internal.info +import spock.lang.Ignore import spock.lang.Specification -import org.elasticsearch.gradle.internal.BwcVersions import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.provider.Provider @@ -31,6 +31,7 @@ class BuildParameterExtensionSpec extends Specification { ProjectBuilder projectBuilder = new ProjectBuilder() + @Ignore def "#getterName is cached anc concurrently accessible"() { given: def project = projectBuilder.build() @@ -85,8 +86,8 @@ class BuildParameterExtensionSpec extends Specification { JavaVersion.VERSION_11, JavaVersion.VERSION_11, JavaVersion.VERSION_11, - "gitRevision", - "gitOrigin", + providerMock(), + providerMock(), "testSeed", false, 5, diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy index ad0db8b1b7de7..cea96437129a6 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/AbstractToolchainResolverSpec.groovy @@ -9,6 +9,8 @@ package org.elasticsearch.gradle.internal.toolchain +import spock.lang.Unroll + import org.gradle.api.provider.Property import org.gradle.jvm.toolchain.JavaLanguageVersion import org.gradle.jvm.toolchain.JavaToolchainDownload @@ -26,6 +28,7 @@ import static org.gradle.platform.OperatingSystem.MAC_OS abstract class AbstractToolchainResolverSpec extends Specification { + @Unroll def "resolves #os #arch #vendor jdk #langVersion"() { given: def resolver = resolverImplementation() diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy index 9c55bbc4674e9..4993bf00f2af5 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/OracleOpenJdkToolchainResolverSpec.groovy @@ -9,15 +9,20 @@ package org.elasticsearch.gradle.internal.toolchain +import spock.util.environment.RestoreSystemProperties import org.gradle.api.services.BuildServiceParameters import org.gradle.jvm.toolchain.JavaLanguageVersion +import org.gradle.jvm.toolchain.JavaToolchainDownload + import static org.gradle.jvm.toolchain.JvmVendorSpec.ORACLE -import static org.gradle.platform.Architecture.* +import static org.gradle.platform.Architecture.AARCH64 +import static org.gradle.platform.Architecture.X86_64 import static org.gradle.platform.OperatingSystem.* class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { + OracleOpenJdkToolchainResolver resolverImplementation() { var toolChain = new OracleOpenJdkToolchainResolver() { @Override @@ -25,10 +30,13 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { return null } } - toolChain.builds = [ - new OracleOpenJdkToolchainResolver.ReleasedJdkBuild(JavaLanguageVersion.of(20), "20", "36", "bdc68b4b9cbc4ebcb30745c85038d91d"), - new OracleOpenJdkToolchainResolver.EarlyAccessJdkBuild(JavaLanguageVersion.of(21), "21", "6") - ] + toolChain.builds = toolChain.builds.findAll { it instanceof OracleOpenJdkToolchainResolver.EarlyAccessJdkBuild } + [ + new OracleOpenJdkToolchainResolver.ReleasedJdkBuild( + JavaLanguageVersion.of(20), + "20", + "36", + "bdc68b4b9cbc4ebcb30745c85038d91d" + )] toolChain } @@ -44,23 +52,67 @@ class OracleOpenJdkToolchainResolverSpec extends AbstractToolchainResolverSpec { [20, anyVendor(), LINUX, AARCH64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-aarch64_bin.tar.gz"], [20, anyVendor(), WINDOWS, X86_64, "https://download.oracle.com/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip"], // https://download.java.net/java/early_access/jdk23/23/GPL/openjdk-23-ea+23_macos-aarch64_bin.tar.gz - [21, ORACLE, MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-x64_bin.tar.gz"], - [21, ORACLE, MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-aarch64_bin.tar.gz"], - [21, ORACLE, LINUX, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-x64_bin.tar.gz"], - [21, ORACLE, LINUX, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-aarch64_bin.tar.gz"], - [21, ORACLE, WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_windows-x64_bin.zip"], - [21, anyVendor(), MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-x64_bin.tar.gz"], - [21, anyVendor(), MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_macos-aarch64_bin.tar.gz"], - [21, anyVendor(), LINUX, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-x64_bin.tar.gz"], - [21, anyVendor(), LINUX, AARCH64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_linux-aarch64_bin.tar.gz"], - [21, anyVendor(), WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk21/21/GPL/openjdk-21-ea+6_windows-x64_bin.zip"] - ] + [24, ORACLE, MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-x64_bin.tar.gz"], + [24, ORACLE, MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-aarch64_bin.tar.gz"], + [24, ORACLE, LINUX, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-x64_bin.tar.gz"], + [24, ORACLE, LINUX, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-aarch64_bin.tar.gz"], + [24, ORACLE, WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_windows-x64_bin.zip"], + [24, anyVendor(), MAC_OS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-x64_bin.tar.gz"], + [24, anyVendor(), MAC_OS, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_macos-aarch64_bin.tar.gz"], + [24, anyVendor(), LINUX, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-x64_bin.tar.gz"], + [24, anyVendor(), LINUX, AARCH64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_linux-aarch64_bin.tar.gz"], + [24, anyVendor(), WINDOWS, X86_64, "https://download.java.net/java/early_access/jdk24/29/GPL/openjdk-24-ea+29_windows-x64_bin.zip"]] + } + + @RestoreSystemProperties + def "can provide build number for ea versions"() { + given: + System.setProperty('runtime.java.build', "42") + System.setProperty('runtime.java.25.build', "13") + def resolver = resolverImplementation() + + when: + Optional download = resolver.resolve( + request( + JavaLanguageVersion.of(version), + vendor, + platform(os, arch) + ) + ) + + then: + download.get().uri == URI.create(expectedUrl) + + where: + version | vendor | os | arch | expectedUrl + 24 | ORACLE | MAC_OS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-x64_bin.tar.gz" + 24 | ORACLE | MAC_OS | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-aarch64_bin.tar.gz" + 24 | ORACLE | LINUX | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-x64_bin.tar.gz" + 24 | ORACLE | LINUX | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-aarch64_bin.tar.gz" + 24 | ORACLE | WINDOWS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_windows-x64_bin.zip" + 24 | anyVendor() | MAC_OS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-x64_bin.tar.gz" + 24 | anyVendor() | MAC_OS | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_macos-aarch64_bin.tar.gz" + 24 | anyVendor() | LINUX | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-x64_bin.tar.gz" + 24 | anyVendor() | LINUX | AARCH64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_linux-aarch64_bin.tar.gz" + 24 | anyVendor() | WINDOWS | X86_64 | urlPrefix(24) + "42/GPL/openjdk-24-ea+42_windows-x64_bin.zip" + 25 | ORACLE | MAC_OS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-x64_bin.tar.gz" + 25 | ORACLE | MAC_OS | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-aarch64_bin.tar.gz" + 25 | ORACLE | LINUX | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-x64_bin.tar.gz" + 25 | ORACLE | LINUX | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-aarch64_bin.tar.gz" + 25 | ORACLE | WINDOWS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_windows-x64_bin.zip" + 25 | anyVendor() | MAC_OS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-x64_bin.tar.gz" + 25 | anyVendor() | MAC_OS | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_macos-aarch64_bin.tar.gz" + 25 | anyVendor() | LINUX | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-x64_bin.tar.gz" + 25 | anyVendor() | LINUX | AARCH64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_linux-aarch64_bin.tar.gz" + 25 | anyVendor() | WINDOWS | X86_64 | urlPrefix(25) + "13/GPL/openjdk-25-ea+13_windows-x64_bin.zip" + } + + private static String urlPrefix(int i) { + return "https://download.java.net/java/early_access/jdk" + i + "/" } def unsupportedRequests() { - [ - [20, ORACLE, WINDOWS, AARCH64] - ] + [[20, ORACLE, WINDOWS, AARCH64]] } } diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index ede1b392b8a41..57882fa842b41 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -19,6 +19,7 @@ commons_lang3 = 3.9 google_oauth_client = 1.34.1 awsv1sdk = 1.12.270 awsv2sdk = 2.28.13 +reactive_streams = 1.0.4 antlr4 = 4.13.1 # bouncy castle version for non-fips. fips jars use a different version diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy index 2575f80ad9da0..bbc47bedeffc6 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy @@ -135,7 +135,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { esplugin { name = 'test-$pluginType' - classname 'org.acme.TestModule' + classname = 'org.acme.TestModule' description = "test $pluginType description" } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/distribution/ElasticsearchDistributionTypes.java b/build-tools/src/main/java/org/elasticsearch/gradle/distribution/ElasticsearchDistributionTypes.java index 8a53d56e3c4ce..68eef25df5be1 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/distribution/ElasticsearchDistributionTypes.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/distribution/ElasticsearchDistributionTypes.java @@ -12,6 +12,6 @@ import org.elasticsearch.gradle.ElasticsearchDistributionType; public class ElasticsearchDistributionTypes { - public static ElasticsearchDistributionType ARCHIVE = new ArchiveElasticsearchDistributionType(); - public static ElasticsearchDistributionType INTEG_TEST_ZIP = new IntegTestZipElasticsearchDistributionType(); + public static final ElasticsearchDistributionType ARCHIVE = new ArchiveElasticsearchDistributionType(); + public static final ElasticsearchDistributionType INTEG_TEST_ZIP = new IntegTestZipElasticsearchDistributionType(); } diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index fe23204d5601c..72d8134869037 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -107,7 +107,7 @@ abstract class AbstractGradleFuncTest extends Specification { .forwardOutput() ), configurationCacheCompatible, buildApiRestrictionsDisabled) - ).withArguments(arguments.collect { it.toString() }) + ).withArguments(arguments.collect { it.toString() } + "--full-stacktrace") } def assertOutputContains(String givenOutput, String expected) { diff --git a/build.gradle b/build.gradle index b95e34640cb5f..e6fc1f4eba28c 100644 --- a/build.gradle +++ b/build.gradle @@ -221,25 +221,11 @@ tasks.register("verifyVersions") { } } -/* - * When adding backcompat behavior that spans major versions, temporarily - * disabling the backcompat tests is necessary. This flag controls - * the enabled state of every bwc task. It should be set back to true - * after the backport of the backcompat code is complete. - */ - +// TODO: This flag existed as a mechanism to disable bwc tests during a backport. It is no +// longer used for that purpose, but instead a way to run only functional tests. We should +// rework the functionalTests task to be more explicit about which tasks it wants to run +// so that that this flag is no longer needed. boolean bwc_tests_enabled = true -// place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "" -if (bwc_tests_enabled == false) { - if (bwc_tests_disabled_issue.isEmpty()) { - throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") - } - println "========================= WARNING =========================" - println " Backwards compatibility tests are disabled!" - println "See ${bwc_tests_disabled_issue}" - println "===========================================================" -} if (project.gradle.startParameter.taskNames.any { it.startsWith("checkPart") || it == 'functionalTests' }) { // Disable BWC tests for checkPart* tasks and platform support tests as it's expected that this will run on it's own check bwc_tests_enabled = false @@ -378,24 +364,24 @@ tasks.register("verifyBwcTestsEnabled") { } tasks.register("branchConsistency") { - description 'Ensures this branch is internally consistent. For example, that versions constants match released versions.' - group 'Verification' + description = 'Ensures this branch is internally consistent. For example, that versions constants match released versions.' + group = 'Verification' dependsOn ":verifyVersions", ":verifyBwcTestsEnabled" } tasks.named("wrapper").configure { distributionType = 'ALL' + def minimumGradleVersionFile = project.file('build-tools-internal/src/main/resources/minimumGradleVersion') doLast { // copy wrapper properties file to build-tools-internal to allow seamless idea integration def file = new File("build-tools-internal/gradle/wrapper/gradle-wrapper.properties") - Files.copy(wrapper.getPropertiesFile().toPath(), file.toPath(), REPLACE_EXISTING) + Files.copy(getPropertiesFile().toPath(), file.toPath(), REPLACE_EXISTING) // copy wrapper properties file to plugins/examples to allow seamless idea integration def examplePluginsWrapperProperties = new File("plugins/examples/gradle/wrapper/gradle-wrapper.properties") - Files.copy(wrapper.getPropertiesFile().toPath(), examplePluginsWrapperProperties.toPath(), REPLACE_EXISTING) - + Files.copy(getPropertiesFile().toPath(), examplePluginsWrapperProperties.toPath(), REPLACE_EXISTING) // Update build-tools to reflect the Gradle upgrade // TODO: we can remove this once we have tests to make sure older versions work. - project.file('build-tools-internal/src/main/resources/minimumGradleVersion').text = gradleVersion + minimumGradleVersionFile.text = gradleVersion println "Updated minimum Gradle Version" } } diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index 7f03450c406dc..9a93cb38b2881 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -12,9 +12,9 @@ group = 'org.elasticsearch.plugin' apply plugin: 'elasticsearch.internal-es-plugin' esplugin { - name 'client-benchmark-noop-api' - description 'Stubbed out Elasticsearch actions that can be used for client-side benchmarking' - classname 'org.elasticsearch.plugin.noop.NoopPlugin' + name = 'client-benchmark-noop-api' + description = 'Stubbed out Elasticsearch actions that can be used for client-side benchmarking' + classname ='org.elasticsearch.plugin.noop.NoopPlugin' } // Not published so no need to assemble diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 003c251186510..3fb2aa6595869 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -36,7 +36,7 @@ base { } // LLRC is licenses under Apache 2.0 -projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) +projectLicenses.set(['The Apache Software License, Version 2.0': providers.provider(() -> 'http://www.apache.org/licenses/LICENSE-2.0')]) licenseFile.set(rootProject.file('licenses/APACHE-LICENSE-2.0.txt')) dependencies { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index 265bd52eabe83..916823fd91b61 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -89,7 +89,6 @@ public static void stopHttpServers() throws IOException { } public void testBuilderUsesDefaultSSLContext() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { try (RestClient client = buildRestClient()) { @@ -97,10 +96,15 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { client.performRequest(new Request("GET", "/")); fail("connection should have been rejected due to SSL handshake"); } catch (Exception e) { - assertThat(e, instanceOf(SSLHandshakeException.class)); + if (inFipsJvm()) { + // Bouncy Castle throw a different exception + assertThat(e, instanceOf(IOException.class)); + assertThat(e.getCause(), instanceOf(javax.net.ssl.SSLException.class)); + } else { + assertThat(e, instanceOf(SSLHandshakeException.class)); + } } } - SSLContext.setDefault(getSslContext()); try (RestClient client = buildRestClient()) { Response response = client.performRequest(new Request("GET", "/")); @@ -112,7 +116,6 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { } public void testBuilderSetsThreadName() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { SSLContext.setDefault(getSslContext()); diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index f6f26c8f7c0d5..9b1cb1140311b 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -32,7 +32,7 @@ base { } // rest client sniffer is licenses under Apache 2.0 -projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) +projectLicenses.set(['The Apache Software License, Version 2.0': providers.provider(() -> 'http://www.apache.org/licenses/LICENSE-2.0')]) licenseFile.set(rootProject.file('licenses/APACHE-LICENSE-2.0.txt')) dependencies { diff --git a/client/test/build.gradle b/client/test/build.gradle index 8de6b3dbf92be..e39b7587b69d5 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -18,7 +18,7 @@ java { group = "${group}.client.test" // rest client sniffer is licenses under Apache 2.0 -projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) +projectLicenses.set(['The Apache Software License, Version 2.0': providers.provider(() -> 'http://www.apache.org/licenses/LICENSE-2.0')]) licenseFile.set(rootProject.file('licenses/APACHE-LICENSE-2.0.txt')) dependencies { diff --git a/distribution/build.gradle b/distribution/build.gradle index e65d07dcfc2b4..e0302a081ce68 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -647,8 +647,8 @@ subprojects { // in the final log4j2.properties configuration, as it appears in the // archive distribution. artifacts.add('log4jConfig', file("${defaultOutputs}/log4j2.properties")) { - type 'file' - name 'log4j2.properties' + type = 'file' + name = 'log4j2.properties' builtBy 'buildDefaultLog4jConfig' } diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index f5b94fb9dfd94..204cfc18950a8 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -28,7 +28,7 @@ repositories { // other Docker variants, the need for the main image to be rebuildable by Docker Hub // means that the Dockerfile itself has to fetch the binary. ivy { - url 'https://github.com/' + url = 'https://github.com/' patternLayout { artifact '/[organisation]/[module]/releases/download/v[revision]/[module]-[classifier]' } @@ -45,12 +45,12 @@ if (useDra == false) { ivy { name = 'beats' if (useLocalArtifacts) { - url getLayout().getBuildDirectory().dir("artifacts").get().asFile + url = getLayout().getBuildDirectory().dir("artifacts").get().asFile patternLayout { artifact '/[organisation]/[module]-[revision]-[classifier].[ext]' } } else { - url "https://artifacts-snapshot.elastic.co/" + url = "https://artifacts-snapshot.elastic.co/" patternLayout { if (VersionProperties.isElasticsearchSnapshot()) { artifact '/[organization]/[revision]/downloads/[organization]/[module]/[module]-[revision]-[classifier].[ext]' @@ -127,7 +127,7 @@ ext.expansions = { Architecture architecture, DockerBase base -> 'bin_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'bin', 'build_date' : buildDate, 'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config', - 'git_revision' : buildParams.gitRevision, + 'git_revision' : buildParams.gitRevision.get(), 'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0', 'package_manager' : base.packageManager, 'docker_base' : base.name().toLowerCase(), @@ -551,6 +551,7 @@ subprojects { Project subProject -> inputs.file("${parent.projectDir}/build/markers/${buildTaskName}.marker") executable = 'docker' outputs.file(tarFile) + outputs.doNotCacheIf("Build cache is disabled for export tasks") { true } args "save", "-o", tarFile, @@ -583,8 +584,8 @@ subprojects { Project subProject -> } artifacts.add('default', file(tarFile)) { - type 'tar' - name artifactName + type = 'tar' + name = artifactName builtBy exportTaskName } } diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index 6cb030565d9d2..48881660b30fe 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -22,7 +22,7 @@ <% if (docker_base == 'iron_bank') { %> ARG BASE_REGISTRY=registry1.dso.mil ARG BASE_IMAGE=ironbank/redhat/ubi/ubi9 -ARG BASE_TAG=9.4 +ARG BASE_TAG=9.5 <% } %> ################################################################################ diff --git a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml index f4364c5008c09..19b4a13dc9f22 100644 --- a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml +++ b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml @@ -1,21 +1,17 @@ --- apiVersion: v1 - # The repository name in registry1, excluding /ironbank/ name: "elastic/elasticsearch/elasticsearch" - # List of tags to push for the repository in registry1 # The most specific version should be the first tag and will be shown # on ironbank.dsop.io tags: - "${version}" - "latest" - # Build args passed to Dockerfile ARGs args: BASE_IMAGE: "redhat/ubi/ubi9" - BASE_TAG: "9.4" - + BASE_TAG: "9.5" # Docker image labels labels: org.opencontainers.image.title: "elasticsearch" @@ -34,7 +30,6 @@ labels: mil.dso.ironbank.image.type: "commercial" # Product the image belongs to for grouping multiple images mil.dso.ironbank.product.name: "elasticsearch" - # List of resources to make available to the offline build context resources: - filename: "elasticsearch-${version}-linux-x86_64.tar.gz" @@ -47,12 +42,14 @@ resources: validation: type: "sha256" value: "93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c" - # List of project maintainers maintainers: - - name: "Rory Hunter" - email: "rory.hunter@elastic.co" - username: "rory" + - name: "Mark Vieira" + email: "mark.vieira@elastic.co" + username: "mark-vieira" + - name: "Rene Gröschke" + email: "rene.groschke@elastic.co" + username: "breskeby" - email: "klepal_alexander@bah.com" name: "Alexander Klepal" username: "alexander.klepal" diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 5f45b4b72974f..b7ba4e32edae3 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -43,7 +43,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.10.0" + id "com.netflix.nebula.ospackage-base" version "11.10.1" } ['deb', 'rpm'].each { type -> @@ -87,21 +87,21 @@ def commonPackageConfig(String type, String architecture) { OS.current().equals(OS.WINDOWS) == false } dependsOn "process${type.capitalize()}Files" - packageName "elasticsearch" + packageName = "elasticsearch" if (type == 'deb') { if (architecture == 'x64') { - arch('amd64') + arch = 'amd64' } else { assert architecture == 'aarch64': architecture - arch('arm64') + arch = 'arm64' } } else { assert type == 'rpm': type if (architecture == 'x64') { - arch('X86_64') + arch = 'X86_64' } else { assert architecture == 'aarch64': architecture - arch('aarch64') + arch = 'aarch64' } } // Follow elasticsearch's file naming convention @@ -200,7 +200,7 @@ def commonPackageConfig(String type, String architecture) { into('/etc') permissionGroup 'elasticsearch' setgid true - includeEmptyDirs true + includeEmptyDirs = true createDirectoryEntry true include("elasticsearch") // empty dir, just to add directory entry include("elasticsearch/jvm.options.d") // empty dir, just to add directory entry @@ -215,7 +215,7 @@ def commonPackageConfig(String type, String architecture) { unix(0660) } permissionGroup 'elasticsearch' - includeEmptyDirs true + includeEmptyDirs = true createDirectoryEntry true fileType CONFIG | NOREPLACE } @@ -265,7 +265,7 @@ def commonPackageConfig(String type, String architecture) { into(file.parent) { from "${packagingFiles}/${file.parent}" include file.name - includeEmptyDirs true + includeEmptyDirs = true createDirectoryEntry true user u permissionGroup g @@ -289,15 +289,15 @@ def commonPackageConfig(String type, String architecture) { // this is package independent configuration ospackage { - maintainer 'Elasticsearch Team ' - summary 'Distributed RESTful search engine built for the cloud' - packageDescription ''' + maintainer = 'Elasticsearch Team ' + summary = 'Distributed RESTful search engine built for the cloud' + packageDescription = ''' Reference documentation can be found at https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html and the 'Elasticsearch: The Definitive Guide' book can be found at https://www.elastic.co/guide/en/elasticsearch/guide/current/index.html '''.stripIndent().trim() - url 'https://www.elastic.co/' + url = 'https://www.elastic.co/' // signing setup if (project.hasProperty('signing.password') && buildParams.isSnapshotBuild() == false) { @@ -311,10 +311,10 @@ ospackage { // version found on oldest supported distro, centos-6 requires('coreutils', '8.4', GREATER | EQUAL) - fileMode 0644 - dirMode 0755 - user 'root' - permissionGroup 'root' + fileMode = 0644 + dirMode = 0755 + user = 'root' + permissionGroup = 'root' into '/usr/share/elasticsearch' } @@ -330,7 +330,7 @@ Closure commonDebConfig(String architecture) { customFields['License'] = 'Elastic-License' archiveVersion = project.version.replace('-', '~') - packageGroup 'web' + packageGroup = 'web' // versions found on oldest supported distro, centos-6 requires('bash', '4.1', GREATER | EQUAL) @@ -358,24 +358,24 @@ Closure commonRpmConfig(String architecture) { return { configure(commonPackageConfig('rpm', architecture)) - license 'Elastic License' + license = 'Elastic License' - packageGroup 'Application/Internet' + packageGroup = 'Application/Internet' requires '/bin/bash' obsoletes packageName, '7.0.0', Flags.LESS prefix '/usr' - packager 'Elasticsearch' + packager = 'Elasticsearch' archiveVersion = project.version.replace('-', '_') release = '1' - os 'LINUX' - distribution 'Elasticsearch' - vendor 'Elasticsearch' + os = 'LINUX' + distribution = 'Elasticsearch' + vendor = 'Elasticsearch' // TODO ospackage doesn't support icon but we used to have one // without this the rpm will have parent dirs of any files we copy in, eg /etc/elasticsearch - addParentDirs false + addParentDirs = false } } diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index f55d90933ed61..94fc6f2cb9025 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -9,7 +9,7 @@ ## should create one or more files in the jvm.options.d ## directory containing your adjustments. ## -## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/jvm-options.html +## See https://www.elastic.co/guide/en/elasticsearch/reference/@project.minor.version@/advanced-configuration.html#set-jvm-options ## for more information. ## ################################################################ diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java index 38bb7d592f7c0..5ab27bac3998a 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java @@ -58,6 +58,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; public class KeyStoreWrapperTests extends ESTestCase { @@ -436,17 +437,8 @@ public void testStringAndFileDistinction() throws Exception { public void testLegacyV3() throws GeneralSecurityException, IOException { assumeFalse("Cannot open unprotected keystore on FIPS JVM", inFipsJvm()); final Path configDir = createTempDir(); - final Path keystore = configDir.resolve("elasticsearch.keystore"); - try ( - InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-elasticsearch.keystore"); - OutputStream os = Files.newOutputStream(keystore) - ) { - final byte[] buffer = new byte[4096]; - int readBytes; - while ((readBytes = is.read(buffer)) > 0) { - os.write(buffer, 0, readBytes); - } - } + copyKeyStoreFromResourceToConfigDir(configDir, "/format-v3-elasticsearch.keystore"); + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); assertNotNull(wrapper); wrapper.decrypt(new char[0]); @@ -460,9 +452,31 @@ public void testLegacyV3() throws GeneralSecurityException, IOException { public void testLegacyV5() throws GeneralSecurityException, IOException { final Path configDir = createTempDir(); + copyKeyStoreFromResourceToConfigDir(configDir, "/format-v5-with-password-elasticsearch.keystore"); + + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); + assertNotNull(wrapper); + wrapper.decrypt("keystorepassword".toCharArray()); + assertThat(wrapper.getFormatVersion(), equalTo(5)); + assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed"))); + } + + public void testLegacyV6() throws GeneralSecurityException, IOException { + final Path configDir = createTempDir(); + copyKeyStoreFromResourceToConfigDir(configDir, "/format-v6-elasticsearch.keystore"); + + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); + assertNotNull(wrapper); + wrapper.decrypt("keystorepassword".toCharArray()); + assertThat(wrapper.getFormatVersion(), equalTo(6)); + assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed", "string"))); + assertThat(wrapper.getString("string"), equalTo("value")); + } + + private void copyKeyStoreFromResourceToConfigDir(Path configDir, String name) throws IOException { final Path keystore = configDir.resolve("elasticsearch.keystore"); try ( - InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v5-with-password-elasticsearch.keystore"); + InputStream is = KeyStoreWrapperTests.class.getResourceAsStream(name); // OutputStream os = Files.newOutputStream(keystore) ) { final byte[] buffer = new byte[4096]; @@ -471,11 +485,6 @@ public void testLegacyV5() throws GeneralSecurityException, IOException { os.write(buffer, 0, readBytes); } } - final KeyStoreWrapper wrapper = KeyStoreWrapper.load(configDir); - assertNotNull(wrapper); - wrapper.decrypt("keystorepassword".toCharArray()); - assertThat(wrapper.getFormatVersion(), equalTo(5)); - assertThat(wrapper.getSettingNames(), equalTo(Set.of("keystore.seed"))); } public void testSerializationNewlyCreated() throws Exception { @@ -487,6 +496,7 @@ public void testSerializationNewlyCreated() throws Exception { wrapper.writeTo(out); final KeyStoreWrapper fromStream = new KeyStoreWrapper(out.bytes().streamInput()); + assertThat(fromStream.getFormatVersion(), is(KeyStoreWrapper.CURRENT_VERSION)); assertThat(fromStream.getSettingNames(), hasSize(2)); assertThat(fromStream.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed")); diff --git a/distribution/tools/keystore-cli/src/test/resources/format-v6-elasticsearch.keystore b/distribution/tools/keystore-cli/src/test/resources/format-v6-elasticsearch.keystore new file mode 100644 index 0000000000000..0f680cc013563 Binary files /dev/null and b/distribution/tools/keystore-cli/src/test/resources/format-v6-elasticsearch.keystore differ diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java index c3b9768946767..1e57d9fab7cfd 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -187,20 +187,12 @@ static String agentCommandLineOption(Path agentJar, Path tmpPropertiesFile) { static void extractSecureSettings(SecureSettings secrets, Map propertiesMap) { final Set settingNames = secrets.getSettingNames(); for (String key : List.of("api_key", "secret_token")) { - for (String prefix : List.of("telemetry.", "tracing.apm.")) { - if (settingNames.contains(prefix + key)) { - if (propertiesMap.containsKey(key)) { - throw new IllegalStateException( - Strings.format("Duplicate telemetry setting: [telemetry.%s] and [tracing.apm.%s]", key, key) - ); - } - - try (SecureString token = secrets.getString(prefix + key)) { - propertiesMap.put(key, token.toString()); - } + String prefix = "telemetry."; + if (settingNames.contains(prefix + key)) { + try (SecureString token = secrets.getString(prefix + key)) { + propertiesMap.put(key, token.toString()); } } - } } @@ -227,44 +219,12 @@ private static Map extractDynamicSettings(Map pr static Map extractApmSettings(Settings settings) throws UserException { final Map propertiesMap = new HashMap<>(); - // tracing.apm.agent. is deprecated by telemetry.agent. final String telemetryAgentPrefix = "telemetry.agent."; - final String deprecatedTelemetryAgentPrefix = "tracing.apm.agent."; final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix); telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key)))); - final Settings apmAgentSettings = settings.getByPrefix(deprecatedTelemetryAgentPrefix); - for (String key : apmAgentSettings.keySet()) { - if (propertiesMap.containsKey(key)) { - throw new IllegalStateException( - Strings.format( - "Duplicate telemetry setting: [%s%s] and [%s%s]", - telemetryAgentPrefix, - key, - deprecatedTelemetryAgentPrefix, - key - ) - ); - } - propertiesMap.put(key, String.valueOf(apmAgentSettings.get(key))); - } - StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings); - if (globalLabels.length() == 0) { - globalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings); - } else { - StringJoiner tracingGlobalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings); - if (tracingGlobalLabels.length() != 0) { - throw new IllegalArgumentException( - "Cannot have global labels with tracing.agent prefix [" - + globalLabels - + "] and telemetry.apm.agent prefix [" - + tracingGlobalLabels - + "]" - ); - } - } if (globalLabels.length() > 0) { propertiesMap.put("global_labels", globalLabels.toString()); } @@ -274,7 +234,7 @@ static Map extractApmSettings(Settings settings) throws UserExce if (propertiesMap.containsKey(key)) { throw new UserException( ExitCodes.CONFIG, - "Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch" + "Do not set a value for [telemetry.agent." + key + "], as this is configured automatically by Elasticsearch" ); } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index fe0f82560894c..d4d40e697470e 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Booleans; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.jdk.RuntimeVersionFeature; @@ -26,7 +27,9 @@ final class SystemJvmOptions { static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { String distroType = sysprops.get("es.distribution.type"); boolean isHotspot = sysprops.getOrDefault("sun.management.compiler", "").contains("HotSpot"); - boolean useEntitlements = Boolean.parseBoolean(sysprops.getOrDefault("es.entitlements.enabled", "false")); + boolean entitlementsExplicitlyEnabled = Booleans.parseBoolean(sysprops.getOrDefault("es.entitlements.enabled", "false")); + // java 24+ only supports entitlements, but it may be enabled on earlier versions explicitly + boolean useEntitlements = RuntimeVersionFeature.isSecurityManagerAvailable() == false || entitlementsExplicitlyEnabled; return Stream.of( Stream.of( /* @@ -71,7 +74,7 @@ static List systemJvmOptions(Settings nodeSettings, final Map s).toList(); } @@ -140,7 +143,7 @@ private static Stream maybeWorkaroundG1Bug() { } @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - private static Stream maybeAllowSecurityManager() { + private static Stream maybeAllowSecurityManager(boolean useEntitlements) { if (RuntimeVersionFeature.isSecurityManagerAvailable()) { // Will become conditional on useEntitlements once entitlements can run without SM return Stream.of("-Djava.security.manager=allow"); @@ -167,12 +170,16 @@ private static Stream maybeAttachEntitlementAgent(boolean useEntitlement } catch (IOException e) { throw new IllegalStateException("Failed to list entitlement jars in: " + dir, e); } + // We instrument classes in these modules to call the bridge. Because the bridge gets patched + // into java.base, we must export the bridge from java.base to these modules. + String modulesContainingEntitlementInstrumentation = "java.logging"; return Stream.of( "-Des.entitlements.enabled=true", "-XX:+EnableDynamicAgentLoading", "-Djdk.attach.allowAttachSelf=true", "--patch-module=java.base=" + bridgeJar, - "--add-exports=java.base/org.elasticsearch.entitlement.bridge=org.elasticsearch.entitlement" + "--add-exports=java.base/org.elasticsearch.entitlement.bridge=org.elasticsearch.entitlement," + + modulesContainingEntitlementInstrumentation ); } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java index a7ba8eb11fbcc..0e067afc1aa73 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java @@ -25,18 +25,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -82,109 +79,63 @@ public void testFileDeleteWorks() throws IOException { } public void testExtractSecureSettings() { - MockSecureSettings duplicateSecureSettings = new MockSecureSettings(); + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("telemetry.secret_token", "token"); + secureSettings.setString("telemetry.api_key", "key"); - for (String prefix : List.of("telemetry.", "tracing.apm.")) { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(prefix + "secret_token", "token"); - secureSettings.setString(prefix + "api_key", "key"); - - duplicateSecureSettings.setString(prefix + "api_key", "secret"); - - Map propertiesMap = new HashMap<>(); - APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap); - - assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key"))); - } - - Exception exception = expectThrows( - IllegalStateException.class, - () -> APMJvmOptions.extractSecureSettings(duplicateSecureSettings, new HashMap<>()) - ); - assertThat(exception.getMessage(), containsString("Duplicate telemetry setting")); - assertThat(exception.getMessage(), containsString("telemetry.api_key")); - assertThat(exception.getMessage(), containsString("tracing.apm.api_key")); + Map propertiesMap = new HashMap<>(); + APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap); + assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key"))); } public void testExtractSettings() throws UserException { - Function buildSettings = (prefix) -> Settings.builder() - .put(prefix + "server_url", "https://myurl:443") - .put(prefix + "service_node_name", "instance-0000000001"); - - for (String prefix : List.of("tracing.apm.agent.", "telemetry.agent.")) { - var name = "APM Tracing"; - var deploy = "123"; - var org = "456"; - var extracted = APMJvmOptions.extractApmSettings( - buildSettings.apply(prefix) - .put(prefix + "global_labels.deployment_name", name) - .put(prefix + "global_labels.deployment_id", deploy) - .put(prefix + "global_labels.organization_id", org) - .build() - ); - - assertThat( - extracted, - allOf( - hasEntry("server_url", "https://myurl:443"), - hasEntry("service_node_name", "instance-0000000001"), - hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one - not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys - ) - ); - - List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); - assertThat(labels, hasSize(3)); - assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy)); - - // test replacing with underscores and skipping empty - name = "APM=Tracing"; - deploy = ""; - org = ",456"; - extracted = APMJvmOptions.extractApmSettings( - buildSettings.apply(prefix) - .put(prefix + "global_labels.deployment_name", name) - .put(prefix + "global_labels.deployment_id", deploy) - .put(prefix + "global_labels.organization_id", org) - .build() - ); - labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); - assertThat(labels, hasSize(2)); - assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456")); - } - - IllegalStateException err = expectThrows( - IllegalStateException.class, - () -> APMJvmOptions.extractApmSettings( - Settings.builder() - .put("tracing.apm.agent.server_url", "https://myurl:443") - .put("telemetry.agent.server_url", "https://myurl-2:443") - .build() - ) - ); - assertThat(err.getMessage(), is("Duplicate telemetry setting: [telemetry.agent.server_url] and [tracing.apm.agent.server_url]")); - } - - public void testNoMixedLabels() { - String telemetryAgent = "telemetry.agent."; - String tracingAgent = "tracing.apm.agent."; - Settings settings = Settings.builder() - .put("tracing.apm.enabled", true) - .put(telemetryAgent + "server_url", "https://myurl:443") - .put(telemetryAgent + "service_node_name", "instance-0000000001") - .put(tracingAgent + "global_labels.deployment_id", "123") - .put(telemetryAgent + "global_labels.organization_id", "456") + Settings defaults = Settings.builder() + .put("telemetry.agent.server_url", "https://myurl:443") + .put("telemetry.agent.service_node_name", "instance-0000000001") .build(); - IllegalArgumentException err = assertThrows(IllegalArgumentException.class, () -> APMJvmOptions.extractApmSettings(settings)); + var name = "APM Tracing"; + var deploy = "123"; + var org = "456"; + var extracted = APMJvmOptions.extractApmSettings( + Settings.builder() + .put(defaults) + .put("telemetry.agent.global_labels.deployment_name", name) + .put("telemetry.agent.global_labels.deployment_id", deploy) + .put("telemetry.agent.global_labels.organization_id", org) + .build() + ); + assertThat( - err.getMessage(), - is( - "Cannot have global labels with tracing.agent prefix [organization_id=456] and" - + " telemetry.apm.agent prefix [deployment_id=123]" + extracted, + allOf( + hasEntry("server_url", "https://myurl:443"), + hasEntry("service_node_name", "instance-0000000001"), + hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one + not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys ) ); + + List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); + assertThat(labels, hasSize(3)); + assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy)); + + // test replacing with underscores and skipping empty + name = "APM=Tracing"; + deploy = ""; + org = ",456"; + extracted = APMJvmOptions.extractApmSettings( + Settings.builder() + .put(defaults) + .put("telemetry.agent.global_labels.deployment_name", name) + .put("telemetry.agent.global_labels.deployment_id", deploy) + .put("telemetry.agent.global_labels.organization_id", org) + .build() + ); + labels = Arrays.stream(extracted.get("global_labels").split(",")).toList(); + assertThat(labels, hasSize(2)); + assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456")); } private Path makeFakeAgentJar() throws IOException { diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index bdb0704fcd880..f2e61861bd3a6 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -9,6 +9,7 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] :docker-repo: docker.elastic.co/elasticsearch/elasticsearch :docker-image: {docker-repo}:{version} +:docker-wolfi-image: {docker-repo}-wolfi:{version} :kib-docker-repo: docker.elastic.co/kibana/kibana :kib-docker-image: {kib-docker-repo}:{version} :plugin_url: https://artifacts.elastic.co/downloads/elasticsearch-plugins diff --git a/docs/build.gradle b/docs/build.gradle index dec0de8ffa844..43b61ea97c089 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -130,8 +130,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { setting 'xpack.security.enabled', 'true' setting 'xpack.security.authc.api_key.enabled', 'true' setting 'xpack.security.authc.token.enabled', 'true' - // disable the ILM history for doc tests to avoid potential lingering tasks that'd cause test flakiness + // disable the ILM and SLM history for doc tests to avoid potential lingering tasks that'd cause test flakiness setting 'indices.lifecycle.history_index_enabled', 'false' + setting 'slm.history_index_enabled', 'false' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.authc.realms.file.file.order', '0' setting 'xpack.security.authc.realms.native.native.order', '1' @@ -177,8 +178,9 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach { } tasks.named("yamlRestTest").configure { + def repoFolder = "${layout.buildDirectory.asFile.get()}/cluster/shared/repo" doFirst { - delete("${buildDir}/cluster/shared/repo") + delete(repoFolder) } } diff --git a/docs/changelog/116358.yaml b/docs/changelog/116358.yaml deleted file mode 100644 index 58b44a1e9bcf5..0000000000000 --- a/docs/changelog/116358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116358 -summary: Update Deberta tokenizer -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/116388.yaml b/docs/changelog/116388.yaml new file mode 100644 index 0000000000000..59cdafc9ec337 --- /dev/null +++ b/docs/changelog/116388.yaml @@ -0,0 +1,5 @@ +pr: 116388 +summary: Add support for partial shard results +area: EQL +type: enhancement +issues: [] diff --git a/docs/changelog/116687.yaml b/docs/changelog/116687.yaml new file mode 100644 index 0000000000000..f8c7f86eff04a --- /dev/null +++ b/docs/changelog/116687.yaml @@ -0,0 +1,5 @@ +pr: 116687 +summary: Add LogsDB option to route on sort fields +area: Logs +type: enhancement +issues: [] diff --git a/docs/changelog/116868.yaml b/docs/changelog/116868.yaml new file mode 100644 index 0000000000000..4ca4d23306462 --- /dev/null +++ b/docs/changelog/116868.yaml @@ -0,0 +1,5 @@ +pr: 116868 +summary: Run `TransportGetComponentTemplateAction` on local node +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/117153.yaml b/docs/changelog/117153.yaml deleted file mode 100644 index f7640c0a7ed6a..0000000000000 --- a/docs/changelog/117153.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 117153 -summary: "ESQL: fix the column position in errors" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/117214.yaml b/docs/changelog/117214.yaml new file mode 100644 index 0000000000000..ba74197eb7634 --- /dev/null +++ b/docs/changelog/117214.yaml @@ -0,0 +1,5 @@ +pr: 117214 +summary: Returning ignored fields in the simulate ingest API +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/117519.yaml b/docs/changelog/117519.yaml new file mode 100644 index 0000000000000..f228278983785 --- /dev/null +++ b/docs/changelog/117519.yaml @@ -0,0 +1,20 @@ +pr: 117519 +summary: Remove `data_frame_transforms` roles +area: Transform +type: breaking +issues: [] +breaking: + title: Remove `data_frame_transforms` roles + area: Transform + details: >- + `data_frame_transforms_admin` and `data_frame_transforms_user` were deprecated in + Elasticsearch 7 and are being removed in Elasticsearch 9. + `data_frame_transforms_admin` is now `transform_admin`. + `data_frame_transforms_user` is now `transform_user`. + Users must call the `_update` API to replace the permissions on the Transform before the + Transform can be started. + impact: >- + Transforms created with either the `data_frame_transforms_admin` or the + `data_frame_transforms_user` role will fail to start. The Transform will remain + in a `stopped` state, and its health will be red while displaying permission failures. + notable: false diff --git a/docs/changelog/117581.yaml b/docs/changelog/117581.yaml new file mode 100644 index 0000000000000..b88017f45e9c9 --- /dev/null +++ b/docs/changelog/117581.yaml @@ -0,0 +1,5 @@ +pr: 117581 +summary: Make reserved built-in roles queryable +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/117778.yaml b/docs/changelog/117778.yaml new file mode 100644 index 0000000000000..880d4f831e533 --- /dev/null +++ b/docs/changelog/117778.yaml @@ -0,0 +1,5 @@ +pr: 117778 +summary: "[Connector APIs] Enforce index prefix for managed connectors" +area: Extract&Transform +type: feature +issues: [] diff --git a/docs/changelog/117858.yaml b/docs/changelog/117858.yaml new file mode 100644 index 0000000000000..70f12dc40027f --- /dev/null +++ b/docs/changelog/117858.yaml @@ -0,0 +1,5 @@ +pr: 117858 +summary: Create upgrade mode +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/117949.yaml b/docs/changelog/117949.yaml new file mode 100644 index 0000000000000..b67f36a224094 --- /dev/null +++ b/docs/changelog/117949.yaml @@ -0,0 +1,5 @@ +pr: 117949 +summary: Move `SlowLogFieldProvider` instantiation to node construction +area: Infra/Logging +type: bug +issues: [] diff --git a/docs/changelog/117989.yaml b/docs/changelog/117989.yaml new file mode 100644 index 0000000000000..e4967141b3ebd --- /dev/null +++ b/docs/changelog/117989.yaml @@ -0,0 +1,5 @@ +pr: 117989 +summary: ESQL Add esql hash function +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118016.yaml b/docs/changelog/118016.yaml new file mode 100644 index 0000000000000..7ee78b901b199 --- /dev/null +++ b/docs/changelog/118016.yaml @@ -0,0 +1,6 @@ +pr: 118016 +summary: Propagate status codes from shard failures appropriately +area: Search +type: enhancement +issues: + - 118482 diff --git a/docs/changelog/118143.yaml b/docs/changelog/118143.yaml new file mode 100644 index 0000000000000..4dcbf4b4b6c2c --- /dev/null +++ b/docs/changelog/118143.yaml @@ -0,0 +1,5 @@ +pr: 118143 +summary: Infrastructure for assuming cluster features in the next major version +area: "Infra/Core" +type: feature +issues: [] diff --git a/docs/changelog/118266.yaml b/docs/changelog/118266.yaml new file mode 100644 index 0000000000000..1b14b12b973c5 --- /dev/null +++ b/docs/changelog/118266.yaml @@ -0,0 +1,5 @@ +pr: 118266 +summary: Prevent data nodes from sending stack traces to coordinator when `error_trace=false` +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/118324.yaml b/docs/changelog/118324.yaml new file mode 100644 index 0000000000000..729ff56f6a253 --- /dev/null +++ b/docs/changelog/118324.yaml @@ -0,0 +1,6 @@ +pr: 118324 +summary: Allow the data type of `null` in filters +area: ES|QL +type: bug +issues: + - 116351 diff --git a/docs/changelog/118353.yaml b/docs/changelog/118353.yaml new file mode 100644 index 0000000000000..7be62a4a60c7e --- /dev/null +++ b/docs/changelog/118353.yaml @@ -0,0 +1,5 @@ +pr: 118353 +summary: Epoch Millis Rounding Down and Not Up 2 +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/118366.yaml b/docs/changelog/118366.yaml new file mode 100644 index 0000000000000..cfeab1937738b --- /dev/null +++ b/docs/changelog/118366.yaml @@ -0,0 +1,22 @@ +pr: 118366 +summary: |- + Configuring a bind DN in an LDAP or Active Directory (AD) realm without a corresponding bind password + will prevent node from starting +area: Authentication +type: breaking +issues: [] +breaking: + title: -| + Configuring a bind DN in an LDAP or Active Directory (AD) realm without + a corresponding bind password will prevent node from starting + area: Cluster and node setting + details: -| + For LDAP or AD authentication realms, setting a bind DN (via the + `xpack.security.authc.realms.ldap.*.bind_dn` or `xpack.security.authc.realms.active_directory.*.bind_dn` + realm settings) without a bind password is a misconfiguration that may prevent successful authentication + to the node. Nodes will fail to start if a bind DN is specified without a password. + impact: -| + If you have a bind DN configured for an LDAP or AD authentication + realm, set a bind password for {ref}/ldap-realm.html#ldap-realm-configuration[LDAP] + or {ref}/active-directory-realm.html#ad-realm-configuration[Active Directory]. + Configuring a bind DN without a password prevents the misconfigured node from starting. diff --git a/docs/changelog/118380.yaml b/docs/changelog/118380.yaml deleted file mode 100644 index 8b26c871fb172..0000000000000 --- a/docs/changelog/118380.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 118380 -summary: Restore original "is within leaf" value in `SparseVectorFieldMapper` -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/118484.yaml b/docs/changelog/118484.yaml new file mode 100644 index 0000000000000..41db476a42523 --- /dev/null +++ b/docs/changelog/118484.yaml @@ -0,0 +1,14 @@ +pr: 118484 +summary: Remove date histogram boolean support +area: Aggregations +type: breaking +issues: [] +breaking: + title: Remove date histogram boolean support + area: Aggregations + details: Elasticsearch no longer allows running Date Histogram aggregations + over boolean fields. Instead, use Terms aggregation for boolean + fields. + impact: We expect the impact to be minimal, as this never produced good + results, and has been deprecated for years. + notable: false diff --git a/docs/changelog/118544.yaml b/docs/changelog/118544.yaml new file mode 100644 index 0000000000000..d59783c4e6194 --- /dev/null +++ b/docs/changelog/118544.yaml @@ -0,0 +1,5 @@ +pr: 118544 +summary: ESQL - Remove restrictions for disjunctions in full text functions +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118562.yaml b/docs/changelog/118562.yaml new file mode 100644 index 0000000000000..a6b00b326151f --- /dev/null +++ b/docs/changelog/118562.yaml @@ -0,0 +1,6 @@ +pr: 118562 +summary: Update data stream deprecations warnings to new format and filter searchable + snapshots from response +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/118585.yaml b/docs/changelog/118585.yaml new file mode 100644 index 0000000000000..4caa5efabbd33 --- /dev/null +++ b/docs/changelog/118585.yaml @@ -0,0 +1,7 @@ +pr: 118585 +summary: Add a generic `rescorer` retriever based on the search request's rescore + functionality +area: Ranking +type: feature +issues: + - 118327 diff --git a/docs/changelog/118602.yaml b/docs/changelog/118602.yaml new file mode 100644 index 0000000000000..a75c5dcf11da3 --- /dev/null +++ b/docs/changelog/118602.yaml @@ -0,0 +1,5 @@ +pr: 118602 +summary: Limit memory usage of `fold` +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/118603.yaml b/docs/changelog/118603.yaml new file mode 100644 index 0000000000000..d61619adfa5f6 --- /dev/null +++ b/docs/changelog/118603.yaml @@ -0,0 +1,6 @@ +pr: 118603 +summary: Allow DATE_PARSE to read the timezones +area: ES|QL +type: bug +issues: + - 117680 diff --git a/docs/changelog/118617.yaml b/docs/changelog/118617.yaml new file mode 100644 index 0000000000000..a8793a114e913 --- /dev/null +++ b/docs/changelog/118617.yaml @@ -0,0 +1,5 @@ +pr: 118617 +summary: Add support for `sparse_vector` queries against `semantic_text` fields +area: "Search" +type: enhancement +issues: [] diff --git a/docs/changelog/118652.yaml b/docs/changelog/118652.yaml new file mode 100644 index 0000000000000..0b08686230405 --- /dev/null +++ b/docs/changelog/118652.yaml @@ -0,0 +1,5 @@ +pr: 118652 +summary: Add Jina AI API to do inference for Embedding and Rerank models +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118669.yaml b/docs/changelog/118669.yaml new file mode 100644 index 0000000000000..4e0d10aaac816 --- /dev/null +++ b/docs/changelog/118669.yaml @@ -0,0 +1,5 @@ +pr: 118669 +summary: "[Connector API] Support soft-deletes of connectors" +area: Extract&Transform +type: feature +issues: [] diff --git a/docs/changelog/118671.yaml b/docs/changelog/118671.yaml new file mode 100644 index 0000000000000..3931cc4179037 --- /dev/null +++ b/docs/changelog/118671.yaml @@ -0,0 +1,11 @@ +pr: 118671 +summary: Adjust `random_score` default field to `_seq_no` field +area: Search +type: breaking +issues: [] +breaking: + title: Adjust `random_score` default field to `_seq_no` field + area: Search + details: When providing a 'seed' parameter to a 'random_score' function in the 'function_score' query but NOT providing a 'field', the default 'field' is switched from '_id' to '_seq_no'. + impact: The random scoring and ordering may change when providing a 'seed' and not providing a 'field' to a 'random_score' function. + notable: false diff --git a/docs/changelog/118674.yaml b/docs/changelog/118674.yaml new file mode 100644 index 0000000000000..eeb90a3b38f66 --- /dev/null +++ b/docs/changelog/118674.yaml @@ -0,0 +1,5 @@ +pr: 118674 +summary: Ignore failures from renormalizing buckets in read-only index +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118681.yaml b/docs/changelog/118681.yaml new file mode 100644 index 0000000000000..a186c05e6cd7d --- /dev/null +++ b/docs/changelog/118681.yaml @@ -0,0 +1,6 @@ +pr: 118681 +summary: '`ConnectTransportException` returns retryable BAD_GATEWAY' +area: Network +type: enhancement +issues: + - 118320 diff --git a/docs/changelog/118697.yaml b/docs/changelog/118697.yaml new file mode 100644 index 0000000000000..6e24e6ae4b47f --- /dev/null +++ b/docs/changelog/118697.yaml @@ -0,0 +1,6 @@ +pr: 118697 +summary: Esql implicit casting for date nanos +area: ES|QL +type: enhancement +issues: + - 118476 diff --git a/docs/changelog/118757.yaml b/docs/changelog/118757.yaml new file mode 100644 index 0000000000000..956e220f21aeb --- /dev/null +++ b/docs/changelog/118757.yaml @@ -0,0 +1,5 @@ +pr: 118757 +summary: Improve handling of nested fields in index reader wrappers +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/118774.yaml b/docs/changelog/118774.yaml new file mode 100644 index 0000000000000..cbd1ca82d1c59 --- /dev/null +++ b/docs/changelog/118774.yaml @@ -0,0 +1,5 @@ +pr: 118774 +summary: Apply default k for knn query eagerly +area: Vector Search +type: bug +issues: [] diff --git a/docs/changelog/118802.yaml b/docs/changelog/118802.yaml new file mode 100644 index 0000000000000..600c4b6a1e203 --- /dev/null +++ b/docs/changelog/118802.yaml @@ -0,0 +1,5 @@ +pr: 118802 +summary: ST_EXTENT_AGG optimize envelope extraction from doc-values for cartesian_shape +area: "ES|QL" +type: enhancement +issues: [] diff --git a/docs/changelog/118804.yaml b/docs/changelog/118804.yaml new file mode 100644 index 0000000000000..1548367a5485f --- /dev/null +++ b/docs/changelog/118804.yaml @@ -0,0 +1,15 @@ +pr: 118804 +summary: Add new experimental `rank_vectors` mapping for late-interaction second order + ranking +area: Vector Search +type: feature +issues: [] +highlight: + title: Add new experimental `rank_vectors` mapping for late-interaction second order + ranking + body: + Late-interaction models are powerful rerankers. While their size and overall + cost doesn't lend itself for HNSW indexing, utilizing them as second order reranking + can provide excellent boosts in relevance. The new `rank_vectors` mapping allows for rescoring + over new and novel multi-vector late-interaction models like ColBERT or ColPali. + notable: true diff --git a/docs/changelog/118816.yaml b/docs/changelog/118816.yaml new file mode 100644 index 0000000000000..f1c1eac90dbcf --- /dev/null +++ b/docs/changelog/118816.yaml @@ -0,0 +1,6 @@ +pr: 118816 +summary: Support flattened field with downsampling +area: Downsampling +type: bug +issues: + - 116319 diff --git a/docs/changelog/118823.yaml b/docs/changelog/118823.yaml new file mode 100644 index 0000000000000..b1afe1c873c17 --- /dev/null +++ b/docs/changelog/118823.yaml @@ -0,0 +1,5 @@ +pr: 118823 +summary: Fix attribute set equals +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/116944.yaml b/docs/changelog/118825.yaml similarity index 84% rename from docs/changelog/116944.yaml rename to docs/changelog/118825.yaml index e7833e49cf965..23170ec4705da 100644 --- a/docs/changelog/116944.yaml +++ b/docs/changelog/118825.yaml @@ -1,4 +1,4 @@ -pr: 116944 +pr: 118825 summary: "Remove support for type, fields, `copy_to` and boost in metadata field definition" area: Mapping type: breaking @@ -6,6 +6,6 @@ issues: [] breaking: title: "Remove support for type, fields, copy_to and boost in metadata field definition" area: Mapping - details: The type, fields, copy_to and boost parameters are no longer supported in metadata field definition + details: The type, fields, copy_to and boost parameters are no longer supported in metadata field definition starting with version 9. impact: Users providing type, fields, copy_to or boost as part of metadata field definition should remove them from their mappings. notable: false diff --git a/docs/changelog/118837.yaml b/docs/changelog/118837.yaml new file mode 100644 index 0000000000000..38cd32f3a3513 --- /dev/null +++ b/docs/changelog/118837.yaml @@ -0,0 +1,5 @@ +pr: 118837 +summary: Add missing timeouts to rest-api-spec ILM APIs +area: "ILM+SLM" +type: bug +issues: [] diff --git a/docs/changelog/118844.yaml b/docs/changelog/118844.yaml new file mode 100644 index 0000000000000..f9f92bcaeb8cb --- /dev/null +++ b/docs/changelog/118844.yaml @@ -0,0 +1,5 @@ +pr: 118844 +summary: Add missing timeouts to rest-api-spec ingest APIs +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/118858.yaml b/docs/changelog/118858.yaml new file mode 100644 index 0000000000000..a2161df1c84c7 --- /dev/null +++ b/docs/changelog/118858.yaml @@ -0,0 +1,5 @@ +pr: 118858 +summary: Lookup join on multiple join fields not yet supported +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118870.yaml b/docs/changelog/118870.yaml new file mode 100644 index 0000000000000..ce3692d5454ae --- /dev/null +++ b/docs/changelog/118870.yaml @@ -0,0 +1,6 @@ +pr: 118870 +summary: Rewrite TO_UPPER/TO_LOWER comparisons +area: ES|QL +type: enhancement +issues: + - 118304 diff --git a/docs/changelog/118871.yaml b/docs/changelog/118871.yaml new file mode 100644 index 0000000000000..3c1a06d450f39 --- /dev/null +++ b/docs/changelog/118871.yaml @@ -0,0 +1,5 @@ +pr: 118871 +summary: "[Elastic Inference Service] Add ElasticInferenceService Unified ChatCompletions Integration" +area: Inference +type: enhancement +issues: [] diff --git a/docs/changelog/118890.yaml b/docs/changelog/118890.yaml new file mode 100644 index 0000000000000..d3fc17157f130 --- /dev/null +++ b/docs/changelog/118890.yaml @@ -0,0 +1,5 @@ +pr: 118890 +summary: Add action to create index from a source index +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/118919.yaml b/docs/changelog/118919.yaml new file mode 100644 index 0000000000000..832fd86fe08ba --- /dev/null +++ b/docs/changelog/118919.yaml @@ -0,0 +1,5 @@ +pr: 118919 +summary: Remove unsupported timeout from rest-api-spec license API +area: License +type: bug +issues: [] diff --git a/docs/changelog/118921.yaml b/docs/changelog/118921.yaml new file mode 100644 index 0000000000000..bd341616d8a14 --- /dev/null +++ b/docs/changelog/118921.yaml @@ -0,0 +1,5 @@ +pr: 118921 +summary: Add missing timeouts to rest-api-spec shutdown APIs +area: Infra/Node Lifecycle +type: bug +issues: [] diff --git a/docs/changelog/118931.yaml b/docs/changelog/118931.yaml new file mode 100644 index 0000000000000..81e9b3cb16521 --- /dev/null +++ b/docs/changelog/118931.yaml @@ -0,0 +1,6 @@ +pr: 118931 +summary: Add a `LicenseAware` interface for licensed Nodes +area: ES|QL +type: enhancement +issues: + - 117405 diff --git a/docs/changelog/118938.yaml b/docs/changelog/118938.yaml new file mode 100644 index 0000000000000..395da7912fd4b --- /dev/null +++ b/docs/changelog/118938.yaml @@ -0,0 +1,5 @@ +pr: 118938 +summary: Hash functions +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118941.yaml b/docs/changelog/118941.yaml new file mode 100644 index 0000000000000..4f0099bb32704 --- /dev/null +++ b/docs/changelog/118941.yaml @@ -0,0 +1,5 @@ +pr: 118941 +summary: Allow archive and searchable snapshots indices in N-2 version +area: Recovery +type: enhancement +issues: [] diff --git a/docs/changelog/118954.yaml b/docs/changelog/118954.yaml new file mode 100644 index 0000000000000..ab2f2cda5c11e --- /dev/null +++ b/docs/changelog/118954.yaml @@ -0,0 +1,5 @@ +pr: 118954 +summary: Add missing parameter to `xpack.info` rest-api-spec +area: Infra/REST API +type: bug +issues: [] diff --git a/docs/changelog/118958.yaml b/docs/changelog/118958.yaml new file mode 100644 index 0000000000000..fb0fd6388ab61 --- /dev/null +++ b/docs/changelog/118958.yaml @@ -0,0 +1,5 @@ +pr: 118958 +summary: Add missing timeouts to rest-api-spec SLM APIs +area: ILM+SLM +type: bug +issues: [] diff --git a/docs/changelog/118959.yaml b/docs/changelog/118959.yaml new file mode 100644 index 0000000000000..95a9c146ae672 --- /dev/null +++ b/docs/changelog/118959.yaml @@ -0,0 +1,5 @@ +pr: 118959 +summary: Allow kibana_system user to manage .reindexed-v8-internal.alerts indices +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/118968.yaml b/docs/changelog/118968.yaml new file mode 100644 index 0000000000000..799cd32471f2f --- /dev/null +++ b/docs/changelog/118968.yaml @@ -0,0 +1,6 @@ +pr: 118968 +summary: Configure index sorting through index settings for logsdb +area: Logs +type: enhancement +issues: + - 118686 diff --git a/docs/changelog/118999.yaml b/docs/changelog/118999.yaml new file mode 100644 index 0000000000000..0188cebbd7685 --- /dev/null +++ b/docs/changelog/118999.yaml @@ -0,0 +1,6 @@ +pr: 118999 +summary: Fix loss of context in the inference API for streaming APIs +area: Machine Learning +type: bug +issues: + - 119000 diff --git a/docs/changelog/119007.yaml b/docs/changelog/119007.yaml new file mode 100644 index 0000000000000..458101b68d454 --- /dev/null +++ b/docs/changelog/119007.yaml @@ -0,0 +1,6 @@ +pr: 119007 +summary: Block-writes cannot be added after read-only +area: Data streams +type: bug +issues: + - 119002 diff --git a/docs/changelog/119011.yaml b/docs/changelog/119011.yaml new file mode 100644 index 0000000000000..0dbb683ceb2fd --- /dev/null +++ b/docs/changelog/119011.yaml @@ -0,0 +1,5 @@ +pr: 119011 +summary: "Add support for knn vector queries on `semantic_text` fields" +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/119054.yaml b/docs/changelog/119054.yaml new file mode 100644 index 0000000000000..720f2e0ab02ed --- /dev/null +++ b/docs/changelog/119054.yaml @@ -0,0 +1,6 @@ +pr: 119054 +summary: "[Security Solution] allows `kibana_system` user to manage .reindexed-v8-*\ + \ Security Solution indices" +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/119067.yaml b/docs/changelog/119067.yaml new file mode 100644 index 0000000000000..c7ddd570bea18 --- /dev/null +++ b/docs/changelog/119067.yaml @@ -0,0 +1,5 @@ +pr: 119067 +summary: Metrics for indexing failures due to version conflicts +area: CRUD +type: feature +issues: [] diff --git a/docs/changelog/119131.yaml b/docs/changelog/119131.yaml new file mode 100644 index 0000000000000..2628b6184f90d --- /dev/null +++ b/docs/changelog/119131.yaml @@ -0,0 +1,5 @@ +pr: 119131 +summary: Expose BwC enrich cache setting in plugin +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/119134.yaml b/docs/changelog/119134.yaml new file mode 100644 index 0000000000000..c4aefac91c701 --- /dev/null +++ b/docs/changelog/119134.yaml @@ -0,0 +1,6 @@ +pr: 119134 +summary: Handle `index.mapping.ignore_malformed` in downsampling +area: Downsampling +type: bug +issues: + - 119075 diff --git a/docs/changelog/119227.yaml b/docs/changelog/119227.yaml new file mode 100644 index 0000000000000..1e3d4f97a3d27 --- /dev/null +++ b/docs/changelog/119227.yaml @@ -0,0 +1,13 @@ +pr: 119227 +summary: Remove unfreeze REST endpoint +area: Indices APIs +type: breaking +issues: [] +breaking: + title: Remove unfreeze REST endpoint + area: REST API + details: >- + The `/{index}/_unfreeze` REST endpoint is no longer supported. This API was deprecated, and the corresponding + `/{index}/_freeze` endpoint was removed in 8.0. + impact: None, since it is not possible to have a frozen index in a version which is readable by Elasticsearch 9.0 + notable: false diff --git a/docs/changelog/119233.yaml b/docs/changelog/119233.yaml new file mode 100644 index 0000000000000..ef89c011ce4f6 --- /dev/null +++ b/docs/changelog/119233.yaml @@ -0,0 +1,5 @@ +pr: 119233 +summary: Fixing `GetDatabaseConfigurationAction` response serialization +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/119250.yaml b/docs/changelog/119250.yaml new file mode 100644 index 0000000000000..9db36957d8050 --- /dev/null +++ b/docs/changelog/119250.yaml @@ -0,0 +1,5 @@ +pr: 119250 +summary: Add rest endpoint for `create_from_source_index` +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119265.yaml b/docs/changelog/119265.yaml new file mode 100644 index 0000000000000..296106b9c01c6 --- /dev/null +++ b/docs/changelog/119265.yaml @@ -0,0 +1,6 @@ +pr: 119265 +summary: Fix `AbstractShapeGeometryFieldMapperTests` +area: "ES|QL" +type: bug +issues: + - 119201 diff --git a/docs/changelog/119291.yaml b/docs/changelog/119291.yaml new file mode 100644 index 0000000000000..89a6b6118049c --- /dev/null +++ b/docs/changelog/119291.yaml @@ -0,0 +1,5 @@ +pr: 119291 +summary: Register mustache size limit setting +area: Infra/Scripting +type: bug +issues: [] diff --git a/docs/changelog/119296.yaml b/docs/changelog/119296.yaml new file mode 100644 index 0000000000000..f1a92cffb5131 --- /dev/null +++ b/docs/changelog/119296.yaml @@ -0,0 +1,6 @@ +pr: 119296 +summary: Fix writing for LOOKUP status +area: ES|QL +type: bug +issues: + - 119086 diff --git a/docs/changelog/119310.yaml b/docs/changelog/119310.yaml new file mode 100644 index 0000000000000..4e09e1a5c39d1 --- /dev/null +++ b/docs/changelog/119310.yaml @@ -0,0 +1,6 @@ +pr: 119310 +summary: Remove ChunkedToXContentBuilder +area: "Network" +type: bug +issues: + - 118647 diff --git a/docs/changelog/119348.yaml b/docs/changelog/119348.yaml new file mode 100644 index 0000000000000..05d2d7499ba03 --- /dev/null +++ b/docs/changelog/119348.yaml @@ -0,0 +1,5 @@ +pr: 119348 +summary: Auto-migrate `max_page_search_size` +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/119449.yaml b/docs/changelog/119449.yaml new file mode 100644 index 0000000000000..f02bfa6d16d60 --- /dev/null +++ b/docs/changelog/119449.yaml @@ -0,0 +1,5 @@ +pr: 119449 +summary: Add missing traces ilm policy for OTel traces data streams +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/119474.yaml b/docs/changelog/119474.yaml new file mode 100644 index 0000000000000..e37561277d220 --- /dev/null +++ b/docs/changelog/119474.yaml @@ -0,0 +1,5 @@ +pr: 119474 +summary: "Add ES|QL cross-cluster query telemetry collection" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119476.yaml b/docs/changelog/119476.yaml new file mode 100644 index 0000000000000..c275e6965d4a1 --- /dev/null +++ b/docs/changelog/119476.yaml @@ -0,0 +1,6 @@ +pr: 119476 +summary: Fix TopN row size estimate +area: ES|QL +type: bug +issues: + - 106956 diff --git a/docs/changelog/119495.yaml b/docs/changelog/119495.yaml new file mode 100644 index 0000000000000..b3e8f7e79d984 --- /dev/null +++ b/docs/changelog/119495.yaml @@ -0,0 +1,5 @@ +pr: 119495 +summary: Add mapping for `event_name` for OTel logs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119504.yaml b/docs/changelog/119504.yaml new file mode 100644 index 0000000000000..f63e422face10 --- /dev/null +++ b/docs/changelog/119504.yaml @@ -0,0 +1,5 @@ +pr: 119504 +summary: Optimized index sorting for OTel logs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119516.yaml b/docs/changelog/119516.yaml new file mode 100644 index 0000000000000..06dd5168a0823 --- /dev/null +++ b/docs/changelog/119516.yaml @@ -0,0 +1,5 @@ +pr: 119516 +summary: "Fix: do not let `_resolve/cluster` hang if remote is unresponsive" +area: Search +type: bug +issues: [] diff --git a/docs/changelog/119542.yaml b/docs/changelog/119542.yaml new file mode 100644 index 0000000000000..aaf26c7dc4b0f --- /dev/null +++ b/docs/changelog/119542.yaml @@ -0,0 +1,5 @@ +pr: 119542 +summary: Wait while index is blocked +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/119543.yaml b/docs/changelog/119543.yaml new file mode 100644 index 0000000000000..7027ea2a49672 --- /dev/null +++ b/docs/changelog/119543.yaml @@ -0,0 +1,7 @@ +pr: 119543 +summary: "[Inference API] Fix unique ID message for inference ID matches trained model\ + \ ID" +area: Machine Learning +type: bug +issues: + - 111312 diff --git a/docs/changelog/119564.yaml b/docs/changelog/119564.yaml new file mode 100644 index 0000000000000..175eff75c8218 --- /dev/null +++ b/docs/changelog/119564.yaml @@ -0,0 +1,5 @@ +pr: 119564 +summary: Http stream activity tracker and exceptions handling +area: Network +type: enhancement +issues: [] diff --git a/docs/changelog/119621.yaml b/docs/changelog/119621.yaml new file mode 100644 index 0000000000000..66c679572cfc0 --- /dev/null +++ b/docs/changelog/119621.yaml @@ -0,0 +1,5 @@ +pr: 119621 +summary: Enable node-level reduction by default +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119637.yaml b/docs/changelog/119637.yaml new file mode 100644 index 0000000000000..c2fd6dc51f068 --- /dev/null +++ b/docs/changelog/119637.yaml @@ -0,0 +1,5 @@ +pr: 119637 +summary: Fix spike detection for short spikes at the tail of the data +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/116423.yaml b/docs/changelog/119679.yaml similarity index 92% rename from docs/changelog/116423.yaml rename to docs/changelog/119679.yaml index d6d10eab410e4..a3fb36bcd01c3 100644 --- a/docs/changelog/116423.yaml +++ b/docs/changelog/119679.yaml @@ -1,4 +1,4 @@ -pr: 116423 +pr: 119679 summary: Support mTLS for the Elastic Inference Service integration inside the inference API area: Machine Learning type: feature diff --git a/docs/changelog/119691.yaml b/docs/changelog/119691.yaml new file mode 100644 index 0000000000000..186944394908d --- /dev/null +++ b/docs/changelog/119691.yaml @@ -0,0 +1,6 @@ +pr: 119691 +summary: Fix `bbq_hnsw` merge file cleanup on random IO exceptions +area: Vector Search +type: bug +issues: + - 119392 diff --git a/docs/changelog/119730.yaml b/docs/changelog/119730.yaml new file mode 100644 index 0000000000000..09ff1362b3b1f --- /dev/null +++ b/docs/changelog/119730.yaml @@ -0,0 +1,5 @@ +pr: 119730 +summary: Enable KQL function as a tech preview +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119748.yaml b/docs/changelog/119748.yaml new file mode 100644 index 0000000000000..8b29fb7c1a647 --- /dev/null +++ b/docs/changelog/119748.yaml @@ -0,0 +1,6 @@ +pr: 119748 +summary: Issue S3 web identity token refresh call with sufficient permissions +area: Snapshot/Restore +type: bug +issues: + - 119747 diff --git a/docs/changelog/119749.yaml b/docs/changelog/119749.yaml new file mode 100644 index 0000000000000..aa2b16ceda5ea --- /dev/null +++ b/docs/changelog/119749.yaml @@ -0,0 +1,5 @@ +pr: 119749 +summary: Strengthen encryption for elasticsearch-keystore tool to AES 256 +area: Infra/CLI +type: enhancement +issues: [] diff --git a/docs/changelog/119750.yaml b/docs/changelog/119750.yaml new file mode 100644 index 0000000000000..2ec5c298d0eb1 --- /dev/null +++ b/docs/changelog/119750.yaml @@ -0,0 +1,6 @@ +pr: 119750 +summary: "ESQL: `connect_transport_exception` should be thrown instead of `verification_exception`\ + \ when ENRICH-ing if remote is disconnected" +area: Search +type: bug +issues: [] diff --git a/docs/changelog/119772.yaml b/docs/changelog/119772.yaml new file mode 100644 index 0000000000000..58d483566b109 --- /dev/null +++ b/docs/changelog/119772.yaml @@ -0,0 +1,6 @@ +pr: 119772 +summary: ESQL Support IN operator for Date nanos +area: ES|QL +type: enhancement +issues: + - 118578 diff --git a/docs/changelog/119780.yaml b/docs/changelog/119780.yaml new file mode 100644 index 0000000000000..5b7226741a416 --- /dev/null +++ b/docs/changelog/119780.yaml @@ -0,0 +1,5 @@ +pr: 119780 +summary: Add index and reindex request settings to speed up reindex +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119792.yaml b/docs/changelog/119792.yaml new file mode 100644 index 0000000000000..02b17c203f69d --- /dev/null +++ b/docs/changelog/119792.yaml @@ -0,0 +1,5 @@ +pr: 119792 +summary: Make semantic text part of the text family +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/119793.yaml b/docs/changelog/119793.yaml new file mode 100644 index 0000000000000..80330c25c2f30 --- /dev/null +++ b/docs/changelog/119793.yaml @@ -0,0 +1,6 @@ +pr: 119793 +summary: Resolve/cluster should mark remotes as not connected when a security exception + is thrown +area: CCS +type: bug +issues: [] diff --git a/docs/changelog/119797.yaml b/docs/changelog/119797.yaml new file mode 100644 index 0000000000000..992c2078e0caa --- /dev/null +++ b/docs/changelog/119797.yaml @@ -0,0 +1,5 @@ +pr: 119797 +summary: "[Inference API] Fix bug checking for e5 or reranker default IDs" +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/119798.yaml b/docs/changelog/119798.yaml new file mode 100644 index 0000000000000..e21ba83d1e10f --- /dev/null +++ b/docs/changelog/119798.yaml @@ -0,0 +1,5 @@ +pr: 119798 +summary: "Add a `PostAnalysisAware,` distribute verification" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/119830.yaml b/docs/changelog/119830.yaml new file mode 100644 index 0000000000000..cf852ae03c1db --- /dev/null +++ b/docs/changelog/119830.yaml @@ -0,0 +1,5 @@ +pr: 119830 +summary: Run `TransportGetComposableIndexTemplate` on local node +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/119831.yaml b/docs/changelog/119831.yaml new file mode 100644 index 0000000000000..61c09d7d54de0 --- /dev/null +++ b/docs/changelog/119831.yaml @@ -0,0 +1,5 @@ +pr: 119831 +summary: Run `TransportClusterGetSettingsAction` on local node +area: Infra/Settings +type: enhancement +issues: [] diff --git a/docs/changelog/119837.yaml b/docs/changelog/119837.yaml new file mode 100644 index 0000000000000..3c0a930aa3544 --- /dev/null +++ b/docs/changelog/119837.yaml @@ -0,0 +1,5 @@ +pr: 119837 +summary: Run `TransportGetIndexTemplateAction` on local node +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/119846.yaml b/docs/changelog/119846.yaml new file mode 100644 index 0000000000000..9e7d99fe1be13 --- /dev/null +++ b/docs/changelog/119846.yaml @@ -0,0 +1,12 @@ +pr: 119846 +summary: Drop support for brackets from METADATA syntax +area: ES|QL +type: deprecation +issues: + - 115401 +deprecation: + title: Drop support for brackets from METADATA syntax + area: ES|QL + details: Please describe the details of this change for the release notes. You can + use asciidoc. + impact: Please describe the impact of this change to users diff --git a/docs/changelog/119863.yaml b/docs/changelog/119863.yaml new file mode 100644 index 0000000000000..63cbf1ba07851 --- /dev/null +++ b/docs/changelog/119863.yaml @@ -0,0 +1,11 @@ +pr: 119863 +summary: Restrict Connector APIs to manage/monitor_connector privileges +area: Extract&Transform +type: breaking +issues: [] +breaking: + title: Restrict Connector APIs to manage/monitor_connector privileges + area: REST API + details: Connector APIs now enforce the manage_connector and monitor_connector privileges (introduced in 8.15), replacing the previous reliance on index-level permissions for .elastic-connectors and .elastic-connectors-sync-jobs in API calls. + impact: Connector APIs now require manage_connector and monitor_connector privileges + notable: false diff --git a/docs/changelog/119893.yaml b/docs/changelog/119893.yaml new file mode 100644 index 0000000000000..35a46ce0940d3 --- /dev/null +++ b/docs/changelog/119893.yaml @@ -0,0 +1,5 @@ +pr: 119893 +summary: Add enterprise license check for Inference API actions +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/119897.yaml b/docs/changelog/119897.yaml new file mode 100644 index 0000000000000..87c5890f9fde1 --- /dev/null +++ b/docs/changelog/119897.yaml @@ -0,0 +1,5 @@ +pr: 119897 +summary: Fix ESQL async get while task is being cancelled +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/119922.yaml b/docs/changelog/119922.yaml new file mode 100644 index 0000000000000..2fc9d9529c968 --- /dev/null +++ b/docs/changelog/119922.yaml @@ -0,0 +1,5 @@ +pr: 119922 +summary: "[Inference API] fix spell words: covertToString to convertToString" +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/119926.yaml b/docs/changelog/119926.yaml new file mode 100644 index 0000000000000..3afafd5b2117f --- /dev/null +++ b/docs/changelog/119926.yaml @@ -0,0 +1,11 @@ +pr: 119926 +summary: "Deprecated tracing.apm.* settings got removed." +area: Infra/Metrics +type: breaking +issues: [] +breaking: + title: "Deprecated tracing.apm.* settings got removed." + area: Cluster and node setting + details: Deprecated `tracing.apm.*` settings got removed, use respective `telemetry.*` / `telemetry.tracing.*` settings instead. + impact: 9.x nodes will refuse to start if any such setting (including secret settings) is still present. + notable: false diff --git a/docs/changelog/120014.yaml b/docs/changelog/120014.yaml new file mode 100644 index 0000000000000..bef1f3ba49939 --- /dev/null +++ b/docs/changelog/120014.yaml @@ -0,0 +1,6 @@ +pr: 120014 +summary: Fix potential file leak in ES816BinaryQuantizedVectorsWriter +area: Search +type: bug +issues: + - 119981 diff --git a/docs/changelog/120038.yaml b/docs/changelog/120038.yaml new file mode 100644 index 0000000000000..fe3a2ccccc095 --- /dev/null +++ b/docs/changelog/120038.yaml @@ -0,0 +1,5 @@ +pr: 120038 +summary: Run template simulation actions on local node +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/120042.yaml b/docs/changelog/120042.yaml new file mode 100644 index 0000000000000..0093068ae9894 --- /dev/null +++ b/docs/changelog/120042.yaml @@ -0,0 +1,5 @@ +pr: 120042 +summary: Match dot prefix of migrated DS backing index with the source index +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/120062.yaml b/docs/changelog/120062.yaml new file mode 100644 index 0000000000000..42e8d97f17444 --- /dev/null +++ b/docs/changelog/120062.yaml @@ -0,0 +1,6 @@ +pr: 120062 +summary: Update Text Similarity Reranker to Properly Handle Aliases +area: Ranking +type: bug +issues: + - 119617 diff --git a/docs/changelog/120084.yaml b/docs/changelog/120084.yaml new file mode 100644 index 0000000000000..aafe490d79f1e --- /dev/null +++ b/docs/changelog/120084.yaml @@ -0,0 +1,5 @@ +pr: 120084 +summary: Improve how reindex data stream index action handles api blocks +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/120087.yaml b/docs/changelog/120087.yaml new file mode 100644 index 0000000000000..8539640809b04 --- /dev/null +++ b/docs/changelog/120087.yaml @@ -0,0 +1,5 @@ +pr: 120087 +summary: Include `clusterApplyListener` in long cluster apply warnings +area: Cluster Coordination +type: enhancement +issues: [] diff --git a/docs/changelog/120133.yaml b/docs/changelog/120133.yaml new file mode 100644 index 0000000000000..4ec88267a1bf8 --- /dev/null +++ b/docs/changelog/120133.yaml @@ -0,0 +1,6 @@ +pr: 120133 +summary: Use approximation to advance matched queries +area: Search +type: bug +issues: + - 120130 diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 44acba4752aaa..164e3398d7a4f 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -241,7 +241,7 @@ The `discovery-ec2` plugin can automatically set the `aws_availability_zone` node attribute to the availability zone of each node. This node attribute allows you to ensure that each shard has copies allocated redundantly across multiple availability zones by using the -{ref}/modules-cluster.html#shard-allocation-awareness[Allocation Awareness] +{ref}/shard-allocation-awareness.html#[Allocation Awareness] feature. In order to enable the automatic definition of the `aws_availability_zone` @@ -333,7 +333,7 @@ labelled as `Moderate` or `Low`. * It is a good idea to distribute your nodes across multiple https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability -zones] and use {ref}/modules-cluster.html#shard-allocation-awareness[shard +zones] and use {ref}/shard-allocation-awareness.html[shard allocation awareness] to ensure that each shard has copies in more than one availability zone. diff --git a/docs/reference/alias.asciidoc b/docs/reference/alias.asciidoc index 9d784f530d63c..f676644c4ec48 100644 --- a/docs/reference/alias.asciidoc +++ b/docs/reference/alias.asciidoc @@ -407,3 +407,24 @@ POST _aliases } ---- // TEST[s/^/PUT my-index-2099.05.06-000001\n/] + +[discrete] +[[remove-index]] +=== Remove an index + +To remove an index, use the aliases API's `remove_index` action. + +[source,console] +---- +POST _aliases +{ + "actions": [ + { + "remove_index": { + "index": "my-index-2099.05.06-000001" + } + } + ] +} +---- +// TEST[s/^/PUT my-index-2099.05.06-000001\n/] diff --git a/docs/reference/analysis.asciidoc b/docs/reference/analysis.asciidoc index 72ab42d22b911..e8fbc3bd81b6d 100644 --- a/docs/reference/analysis.asciidoc +++ b/docs/reference/analysis.asciidoc @@ -9,8 +9,7 @@ -- _Text analysis_ is the process of converting unstructured text, like -the body of an email or a product description, into a structured format that's -optimized for search. +the body of an email or a product description, into a structured format that's <>. [discrete] [[when-to-configure-analysis]] diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index 38e4ebfcabc39..89928f07b5638 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -1,6 +1,14 @@ [[analysis-tokenizers]] == Tokenizer reference +.Difference between {es} tokenization and neural tokenization +[NOTE] +==== +{es}'s tokenization process produces linguistic tokens, optimized for search and retrieval. +This differs from neural tokenization in the context of machine learning and natural language processing. Neural tokenizers translate strings into smaller, subword tokens, which are encoded into vectors for consumptions by neural networks. +{es} does not have built-in neural tokenizers. +==== + A _tokenizer_ receives a stream of characters, breaks it up into individual _tokens_ (usually individual words), and outputs a stream of _tokens_. For instance, a <> tokenizer breaks diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index f8d925945401e..28933eb75050d 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -28,7 +28,7 @@ You can pass an `X-Opaque-Id` HTTP header to track the origin of a request in * Response of any request that includes the header * <<_identifying_running_tasks,Task management API>> response -* <<_identifying_search_slow_log_origin,Slow logs>> +* <> * <> For the deprecation logs, {es} also uses the `X-Opaque-Id` value to throttle @@ -52,7 +52,7 @@ safely generate a unique `traceparent` header for each request. If provided, {es} surfaces the header's `trace-id` value as `trace.id` in the: * <> -* <<_identifying_search_slow_log_origin,Slow logs>> +* <> * <> For example, the following `traceparent` value would produce the following diff --git a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc index e4da2c45ee978..87de3818bfaf8 100644 --- a/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc +++ b/docs/reference/autoscaling/apis/autoscaling-apis.asciidoc @@ -4,6 +4,13 @@ NOTE: {cloud-only} + +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-autoscaling[Autoscaling APIs]. +-- + You can use the following APIs to perform {cloud}/ec-autoscaling.html[autoscaling operations]. [discrete] diff --git a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc index 190428485a003..349e40aab0540 100644 --- a/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/delete-autoscaling-policy.asciidoc @@ -7,6 +7,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-autoscaling[Autoscaling APIs]. +-- + Delete {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-delete-autoscaling-policy-request]] diff --git a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc index d635d8c8f7bd0..d45f7cbacc242 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-capacity.asciidoc @@ -7,6 +7,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-autoscaling[Autoscaling APIs]. +-- + Get {cloud}/ec-autoscaling.html[autoscaling] capacity. [[autoscaling-get-autoscaling-capacity-request]] diff --git a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc index 973eedcb361c9..9962b266fb662 100644 --- a/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/get-autoscaling-policy.asciidoc @@ -7,6 +7,13 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-autoscaling[Autoscaling APIs]. +-- + + Get {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-get-autoscaling-policy-request]] diff --git a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc index e564f83411eb4..97c6a54fab03a 100644 --- a/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc +++ b/docs/reference/autoscaling/apis/put-autoscaling-policy.asciidoc @@ -7,6 +7,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-autoscaling[Autoscaling APIs]. +-- + Creates or updates an {cloud}/ec-autoscaling.html[autoscaling] policy. [[autoscaling-put-autoscaling-policy-request]] diff --git a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc index a6894a933b460..19c1b5437ef0c 100644 --- a/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/delete-analytics-collection.asciidoc @@ -8,6 +8,12 @@ beta::[] Delete Analytics Collection ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-analytics[Behavioral analytics APIs]. +-- + //// [source,console] ---- diff --git a/docs/reference/behavioral-analytics/apis/index.asciidoc b/docs/reference/behavioral-analytics/apis/index.asciidoc index 692d3374f89f5..6dc12599c2297 100644 --- a/docs/reference/behavioral-analytics/apis/index.asciidoc +++ b/docs/reference/behavioral-analytics/apis/index.asciidoc @@ -9,6 +9,12 @@ beta::[] --- +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-analytics[Behavioral analytics APIs]. +-- + Use the following APIs to manage tasks and resources related to <>: * <> diff --git a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc index 14511a1258278..46ee8296f3ebe 100644 --- a/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/list-analytics-collection.asciidoc @@ -8,6 +8,12 @@ beta::[] List Analytics Collections ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-analytics[Behavioral analytics APIs]. +-- + //// [source,console] ---- diff --git a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc index f82717e22ed34..60985cd50d3d1 100644 --- a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc +++ b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc @@ -8,6 +8,12 @@ beta::[] Post Analytics Collection Event ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-analytics[Behavioral analytics APIs]. +-- + //// [source,console] ---- diff --git a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc index cbbab2ae3e26c..412277afa2076 100644 --- a/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc +++ b/docs/reference/behavioral-analytics/apis/put-analytics-collection.asciidoc @@ -8,6 +8,12 @@ beta::[] Put Analytics Collection ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-analytics[Behavioral analytics APIs]. +-- + //// [source,console] ---- diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 328970e1d9dc4..61ec9f7680f7f 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -1,6 +1,12 @@ [[cat]] == Compact and aligned text (CAT) APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs]. +-- + ["float",id="intro"] === Introduction diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index aab0c9df25ed4..0a7c1828d4876 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -4,6 +4,12 @@ cat aliases ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or the diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index bbd044c4b8e5e..34b8069b91e27 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -4,6 +4,12 @@ cat allocation ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/anomaly-detectors.asciidoc b/docs/reference/cat/anomaly-detectors.asciidoc index 68d952d2a8532..03cd824092cf6 100644 --- a/docs/reference/cat/anomaly-detectors.asciidoc +++ b/docs/reference/cat/anomaly-detectors.asciidoc @@ -5,6 +5,12 @@ cat anomaly detectors ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/component-templates.asciidoc b/docs/reference/cat/component-templates.asciidoc index 596c86befd1b7..8be1096a215f3 100644 --- a/docs/reference/cat/component-templates.asciidoc +++ b/docs/reference/cat/component-templates.asciidoc @@ -4,6 +4,12 @@ cat component templates ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index 37e602c759020..7adcd1464dab1 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -4,6 +4,12 @@ cat count ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/datafeeds.asciidoc b/docs/reference/cat/datafeeds.asciidoc index 506812fedabad..29f5bc8150af1 100644 --- a/docs/reference/cat/datafeeds.asciidoc +++ b/docs/reference/cat/datafeeds.asciidoc @@ -5,6 +5,12 @@ cat {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/dataframeanalytics.asciidoc b/docs/reference/cat/dataframeanalytics.asciidoc index ed0f697c36d50..f00a9826ee5bd 100644 --- a/docs/reference/cat/dataframeanalytics.asciidoc +++ b/docs/reference/cat/dataframeanalytics.asciidoc @@ -5,6 +5,12 @@ cat {dfanalytics} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index 376ef1d97057c..f11e40263ec2b 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -4,6 +4,12 @@ cat fielddata ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index ad39ace310807..7ffc170ec8515 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -4,6 +4,12 @@ cat health ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index b8dda01c2eae0..3397c05f49735 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -4,6 +4,12 @@ cat indices ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index bcf2b876e4506..4ac40ff50be60 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -4,6 +4,12 @@ cat master ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index ff37b430956aa..6c8093846030c 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -4,6 +4,12 @@ cat nodeattrs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} @@ -11,7 +17,7 @@ console. They are _not_ intended for use by applications. For application consumption, use the <>. ==== -Returns information about <>. +Returns information about <>. [[cat-nodeattrs-api-request]] ==== {api-request-title} diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index 651c4ef3c7c2a..a5a813e8d37d5 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -5,6 +5,12 @@ cat nodes ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} @@ -233,6 +239,9 @@ Number of indexing operations, such as `1`. `indexing.index_failed`, `iif`, `indexingIndexFailed`:: Number of failed indexing operations, such as `0`. +`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`:: +Number of failed indexing operations due to version conflict, such as `0`. + `merges.current`, `mc`, `mergesCurrent`:: Number of current merge operations, such as `0`. diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc index f772cdb66d889..081a74da07552 100644 --- a/docs/reference/cat/pending_tasks.asciidoc +++ b/docs/reference/cat/pending_tasks.asciidoc @@ -4,6 +4,12 @@ cat pending tasks ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index ae360862a0edb..c4d830ee52a76 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -4,6 +4,11 @@ cat plugins ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- [IMPORTANT] ==== diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index 7393a8b719089..9df46f6fe93f6 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -4,6 +4,12 @@ cat recovery ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/repositories.asciidoc b/docs/reference/cat/repositories.asciidoc index ec2f243c27bec..7e6283336e17a 100644 --- a/docs/reference/cat/repositories.asciidoc +++ b/docs/reference/cat/repositories.asciidoc @@ -4,6 +4,12 @@ cat repositories ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 82dc7298a0783..70b5e597eb95f 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -4,6 +4,12 @@ cat segments ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 87dcb01838bfd..2d3859e74c87e 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -5,6 +5,12 @@ cat shards ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} @@ -156,6 +162,9 @@ Number of indexing operations, such as `1`. `indexing.index_failed`, `iif`, `indexingIndexFailed`:: Number of failed indexing operations, such as `0`. +`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`:: +Number of failed indexing operations due to version conflict, such as `0`. + `merges.current`, `mc`, `mergesCurrent`:: Number of current merge operations, such as `0`. diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 820c4b56c783d..1da739b20272f 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -4,6 +4,12 @@ cat snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/tasks.asciidoc b/docs/reference/cat/tasks.asciidoc index 91d67baa72d70..ff654b30de992 100644 --- a/docs/reference/cat/tasks.asciidoc +++ b/docs/reference/cat/tasks.asciidoc @@ -6,6 +6,12 @@ beta::["The cat task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.",{es-issue}51628] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index bcc8e9e4f5dc4..78ff60c663d2f 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -4,6 +4,12 @@ cat templates ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 948ed9a1a7a30..1d8517f170aed 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -4,6 +4,12 @@ cat thread pool ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/trainedmodel.asciidoc b/docs/reference/cat/trainedmodel.asciidoc index 5b20a0b6e842f..378238323f50b 100644 --- a/docs/reference/cat/trainedmodel.asciidoc +++ b/docs/reference/cat/trainedmodel.asciidoc @@ -5,6 +5,12 @@ cat trained model ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/cat/transforms.asciidoc b/docs/reference/cat/transforms.asciidoc index 53f22f02fbdbf..8e5becc5fa76d 100644 --- a/docs/reference/cat/transforms.asciidoc +++ b/docs/reference/cat/transforms.asciidoc @@ -5,6 +5,12 @@ cat transforms ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cat[Compact and aligned text (CAT) APIs].. +-- + [IMPORTANT] ==== cat APIs are only intended for human consumption using the command line or {kib} diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index b510163bab50b..2ad50d68b923a 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -5,6 +5,12 @@ Delete auto-follow pattern ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Delete {ccr} <>. [[ccr-delete-auto-follow-pattern-request]] diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index a2969e993ddfb..951185d14e920 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -5,6 +5,12 @@ Get auto-follow pattern ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Get {ccr} <>. [[ccr-get-auto-follow-pattern-request]] diff --git a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc index c5ae5a7b4af9d..462ee213ed4e4 100644 --- a/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc @@ -5,6 +5,12 @@ Pause auto-follow pattern ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Pauses a {ccr} <>. [[ccr-pause-auto-follow-pattern-request]] diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index 6769f21ca5cef..672a11302fdd5 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -5,6 +5,12 @@ Create auto-follow pattern ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Creates a {ccr} <>. [[ccr-put-auto-follow-pattern-request]] diff --git a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc index a580bb3838f9b..d97c41d67c1ea 100644 --- a/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc @@ -5,6 +5,12 @@ Resume auto-follow pattern ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Resumes a {ccr} <>. [[ccr-resume-auto-follow-pattern-request]] diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index ae94e1931af85..0decb98197d31 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -2,6 +2,12 @@ [[ccr-apis]] == {ccr-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + You can use the following APIs to perform <> operations. [discrete] diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 6c049d9c92b59..4c05faa0a7db8 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -5,6 +5,12 @@ Get follower info ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Retrieves information about all <> follower indices. [[ccr-get-follow-info-request]] diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 4892f86b3523d..29000a98f64b4 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -5,6 +5,12 @@ Get follower stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Get <> follower stats. [[ccr-get-follow-stats-request]] diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index 1917c08d6640d..8a7887072f6a2 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -5,6 +5,12 @@ Forget follower ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Removes the <> follower retention leases from the leader. [[ccr-post-forget-follower-request]] diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index 6d4730d10efe6..c49e9a49b56c9 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -5,6 +5,12 @@ Pause follower ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Pauses a <> follower index. [[ccr-post-pause-follow-request]] diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index b023a8cb5cb70..f6da0110d5c24 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -5,6 +5,12 @@ Resume follower ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Resumes a <> follower index. [[ccr-post-resume-follow-request]] diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index dab11ef9e7a54..56b3195e8a134 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -5,6 +5,12 @@ Unfollow ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Converts a <> follower index to a regular index. [[ccr-post-unfollow-request]] diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index b7ae9ac987474..d9538fc436563 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -5,6 +5,12 @@ Create follower ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Creates a <> follower index. [[ccr-put-follow-request]] diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 92e6bae0bdce8..e92ad17e10437 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -6,6 +6,12 @@ Get {ccr-init} stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ccr[Cross-cluster replication APIs]. +-- + Get <> stats. [[ccr-get-stats-request]] diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index 3de386d3288c6..398ece616fe07 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -1,6 +1,12 @@ [[cluster]] == Cluster APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + ["float",id="cluster-nodes"] === Node specification @@ -29,7 +35,7 @@ one of the following: master-eligible nodes, all data nodes, all ingest nodes, all voting-only nodes, all machine learning nodes, and all coordinating-only nodes. * a pair of patterns, using `*` wildcards, of the form `attrname:attrvalue`, - which adds to the subset all nodes with a custom node attribute whose name + which adds to the subset all nodes with a <> whose name and value match the respective patterns. Custom node attributes are configured by setting properties in the configuration file of the form `node.attr.attrname: attrvalue`. diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index e640fa77c71ee..7099163cc98e9 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -4,6 +4,12 @@ Cluster allocation explain ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Provides an explanation for a shard's current <>. [source,console] diff --git a/docs/reference/cluster/cluster-info.asciidoc b/docs/reference/cluster/cluster-info.asciidoc index 7d67f1602aeaa..52ae637d8f46c 100644 --- a/docs/reference/cluster/cluster-info.asciidoc +++ b/docs/reference/cluster/cluster-info.asciidoc @@ -7,6 +7,12 @@ experimental::[] Cluster Info ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns cluster information. [[cluster-info-api-request]] diff --git a/docs/reference/cluster/delete-desired-balance.asciidoc b/docs/reference/cluster/delete-desired-balance.asciidoc index c67834269e505..45fa147258b78 100644 --- a/docs/reference/cluster/delete-desired-balance.asciidoc +++ b/docs/reference/cluster/delete-desired-balance.asciidoc @@ -6,6 +6,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Discards the current <> and computes a new desired balance starting from the current allocation of shards. This can sometimes help {es} find a desired balance which needs fewer shard movements to achieve, especially if the cluster has experienced changes so substantial that the current desired balance is no longer optimal without {es} having diff --git a/docs/reference/cluster/delete-desired-nodes.asciidoc b/docs/reference/cluster/delete-desired-nodes.asciidoc index a58d19e2dfa3f..883bc22f6d964 100644 --- a/docs/reference/cluster/delete-desired-nodes.asciidoc +++ b/docs/reference/cluster/delete-desired-nodes.asciidoc @@ -6,6 +6,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Delete desired nodes. [[delete-desired-nodes-request]] diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index 74afdaa52daf1..3c6b1dc48719c 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -6,6 +6,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Exposes: * the <> computation and reconciliation stats diff --git a/docs/reference/cluster/get-desired-nodes.asciidoc b/docs/reference/cluster/get-desired-nodes.asciidoc index de27bd657b3ff..56af6913e34ac 100644 --- a/docs/reference/cluster/get-desired-nodes.asciidoc +++ b/docs/reference/cluster/get-desired-nodes.asciidoc @@ -6,6 +6,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Get desired nodes. [[get-desired-nodes-request]] diff --git a/docs/reference/cluster/get-settings.asciidoc b/docs/reference/cluster/get-settings.asciidoc index 32c186e4ef24c..5c0fe7a2026c7 100644 --- a/docs/reference/cluster/get-settings.asciidoc +++ b/docs/reference/cluster/get-settings.asciidoc @@ -4,6 +4,12 @@ Cluster get settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns cluster-wide settings. [source,console] diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 94eb80a03d12e..374dd5d4a6f82 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -4,6 +4,12 @@ Cluster health ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns the health status of a cluster. [[cluster-health-api-request]] diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index 446a8fede69ec..f8b414453ae66 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -4,6 +4,12 @@ Nodes hot threads ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns the hot threads on each selected node in the cluster. [[cluster-nodes-hot-threads-api-request]] diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index 6f1d769e696c5..7ae6db7aa9a56 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -4,8 +4,13 @@ Nodes info ++++ -Returns cluster nodes information. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- +Returns cluster nodes information. [[cluster-nodes-info-api-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc index f86304d2e9ba0..842ca30c335f9 100644 --- a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc +++ b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc @@ -4,6 +4,12 @@ Nodes reload secure settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Reloads the keystore on nodes in the cluster. [[cluster-nodes-reload-secure-settings-api-request]] diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index adf8229712ecc..522983035079d 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -5,6 +5,12 @@ Nodes stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns cluster nodes statistics. [[cluster-nodes-stats-api-request]] diff --git a/docs/reference/cluster/nodes-usage.asciidoc b/docs/reference/cluster/nodes-usage.asciidoc index 486edf67bba87..c7994e32204a6 100644 --- a/docs/reference/cluster/nodes-usage.asciidoc +++ b/docs/reference/cluster/nodes-usage.asciidoc @@ -4,8 +4,13 @@ Nodes feature usage ++++ -Returns information on the usage of features. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- +Returns information on the usage of features. [[cluster-nodes-usage-api-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/pending.asciidoc b/docs/reference/cluster/pending.asciidoc index 3e87234c7d26c..f5d42a6df76a6 100644 --- a/docs/reference/cluster/pending.asciidoc +++ b/docs/reference/cluster/pending.asciidoc @@ -4,6 +4,12 @@ Pending cluster tasks ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns cluster-level changes that have not yet been executed. diff --git a/docs/reference/cluster/prevalidate-node-removal.asciidoc b/docs/reference/cluster/prevalidate-node-removal.asciidoc index 16bf28c586687..0a09f1adda77c 100644 --- a/docs/reference/cluster/prevalidate-node-removal.asciidoc +++ b/docs/reference/cluster/prevalidate-node-removal.asciidoc @@ -6,6 +6,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Prevalidate node removal. [[prevalidate-node-removal-api-request]] diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index 8f2923846df0e..691acafd8ddbe 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -4,8 +4,13 @@ Remote cluster info ++++ -Returns configured remote cluster information. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- +Returns configured remote cluster information. [[cluster-remote-info-api-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index 429070f80b9bf..b393a9a68d2b2 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -4,8 +4,13 @@ Cluster reroute ++++ -Changes the allocation of shards in a cluster. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- +Changes the allocation of shards in a cluster. [[cluster-reroute-api-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc index fcb2f5f2f5dcd..bf2863018893a 100644 --- a/docs/reference/cluster/state.asciidoc +++ b/docs/reference/cluster/state.asciidoc @@ -4,6 +4,12 @@ Cluster state ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns an internal representation of the cluster state for debugging or diagnostic purposes. diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index d875417bde51a..f078fd2b7f2ee 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -5,6 +5,12 @@ Cluster stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Returns cluster statistics. [[cluster-stats-api-request]] @@ -19,7 +25,6 @@ Returns cluster statistics. * If the {es} {security-features} are enabled, you must have the `monitor` or `manage` <> to use this API. - [[cluster-stats-api-desc]] ==== {api-description-title} @@ -1391,7 +1396,7 @@ as a human-readable string. `_search`::: -(object) Contains the information about the <> usage in the cluster. +(object) Contains information about <> usage. + .Properties of `_search` [%collapsible%open] @@ -1522,7 +1527,11 @@ This may include requests where partial results were returned, but not requests ======= + ====== +`_esql`::: +(object) Contains information about <> usage. +The structure of the object is the same as the `_search` object above. ===== diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index 4b32d5f1b903a..79727d9a3078b 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -6,6 +6,12 @@ beta::["The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.",{es-issue}51628] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-tasks[task management APIs]. +-- + Returns information about the tasks currently executing in the cluster. [[tasks-api-request]] diff --git a/docs/reference/cluster/update-desired-nodes.asciidoc b/docs/reference/cluster/update-desired-nodes.asciidoc index c72a2b53208e5..f83f551395134 100644 --- a/docs/reference/cluster/update-desired-nodes.asciidoc +++ b/docs/reference/cluster/update-desired-nodes.asciidoc @@ -6,6 +6,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Creates or updates the desired nodes. [[update-desired-nodes-request]] diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 3d8bdcca07e2b..9a718ee413e64 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -4,8 +4,13 @@ Cluster update settings ++++ -Configures <>. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- +Configures <>. [[cluster-update-settings-api-request]] ==== {api-request-title} diff --git a/docs/reference/cluster/voting-exclusions.asciidoc b/docs/reference/cluster/voting-exclusions.asciidoc index 55587a7010f8f..e60b3be26508d 100644 --- a/docs/reference/cluster/voting-exclusions.asciidoc +++ b/docs/reference/cluster/voting-exclusions.asciidoc @@ -4,6 +4,12 @@ Voting configuration exclusions ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + Adds or removes master-eligible nodes from the <>. diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index cdd2bb8f0f9d7..265006aa3df17 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -23,8 +23,8 @@ bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-versio This tool has a number of modes: * `elasticsearch-node repurpose` can be used to delete unwanted data from a - node if it used to be a <> or a - <> but has been repurposed not to have one + node if it used to be a <> or a + <> but has been repurposed not to have one or other of these roles. * `elasticsearch-node remove-settings` can be used to remove persistent settings diff --git a/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc index 8cd8f71dc7aea..d4db8ce62bc46 100644 --- a/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Cancels a connector sync job. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/check-in-connector-api.asciidoc b/docs/reference/connector/apis/check-in-connector-api.asciidoc index 15e65b10074d8..be7521e937316 100644 --- a/docs/reference/connector/apis/check-in-connector-api.asciidoc +++ b/docs/reference/connector/apis/check-in-connector-api.asciidoc @@ -6,6 +6,12 @@ preview::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `last_seen` field of a connector with current timestamp. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc index 8d7d0a36ad88a..5f9b584621c2f 100644 --- a/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/check-in-connector-sync-job-api.asciidoc @@ -6,6 +6,12 @@ preview::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Checks in a connector sync job (updates `last_seen` to the current time). To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc index 62491582ce757..565a39c2083af 100644 --- a/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/claim-connector-sync-job-api.asciidoc @@ -6,6 +6,12 @@ preview::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Claims a connector sync job. The `_claim` endpoint is not intended for direct connector management by users. It is there to support the implementation of services that utilize the https://github.com/elastic/connectors/blob/main/docs/CONNECTOR_PROTOCOL.md[Connector Protocol] to communicate with {es}. diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc index 15ce31a605986..719db5a315714 100644 --- a/docs/reference/connector/apis/connector-apis.asciidoc +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -1,6 +1,12 @@ [[connector-apis]] == Connector APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + beta::[] The connector and sync jobs APIs provide a convenient way to create and manage Elastic <>. diff --git a/docs/reference/connector/apis/create-connector-api.asciidoc b/docs/reference/connector/apis/create-connector-api.asciidoc index 3ecef6d302732..3300ce270c345 100644 --- a/docs/reference/connector/apis/create-connector-api.asciidoc +++ b/docs/reference/connector/apis/create-connector-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Creates an Elastic connector. Connectors are {es} integrations that bring content from third-party data sources, which can be deployed on {ecloud} or hosted on your own infrastructure: diff --git a/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc index 240ab696954f3..1a66d47578a8b 100644 --- a/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc @@ -6,6 +6,11 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- Creates a connector sync job. diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc index 76621d7f1843b..f161a3c3b5933 100644 --- a/docs/reference/connector/apis/delete-connector-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -6,8 +6,14 @@ beta::[] -Removes a connector and associated sync jobs. -This is a destructive action that is not recoverable. + +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + +Soft-deletes a connector and removes associated sync jobs. Note: this action doesn't delete any API key, ingest pipeline or data index associated with the connector. These need to be removed manually. diff --git a/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc index eeea40f430abd..bc906d12cae40 100644 --- a/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Removes a connector sync job and its associated data. This is a destructive action that is not recoverable. diff --git a/docs/reference/connector/apis/get-connector-api.asciidoc b/docs/reference/connector/apis/get-connector-api.asciidoc index 302773e0af831..c8cbae668c261 100644 --- a/docs/reference/connector/apis/get-connector-api.asciidoc +++ b/docs/reference/connector/apis/get-connector-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Retrieves the details about a connector. To get started with Connector APIs, check out <>. @@ -27,6 +33,9 @@ To get started with Connector APIs, check out <`:: (Required, string) +`include_deleted`:: +(Optional, boolean) A flag indicating whether to also return connectors that have been soft-deleted. Defaults to `false`. + [[get-connector-api-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc index a524c1291c26a..f4ccc59e0315e 100644 --- a/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Retrieves the details about a connector sync job. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc index 4a4fa5a22dcc1..b5f52e31ac296 100644 --- a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc +++ b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc @@ -7,6 +7,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Returns information about all stored connector sync jobs ordered by their creation date in ascending order. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/list-connectors-api.asciidoc b/docs/reference/connector/apis/list-connectors-api.asciidoc index 4a93ecf2b0109..d334e5d92c232 100644 --- a/docs/reference/connector/apis/list-connectors-api.asciidoc +++ b/docs/reference/connector/apis/list-connectors-api.asciidoc @@ -7,6 +7,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Returns information about all created connectors. To get started with Connector APIs, check out <>. @@ -41,6 +47,9 @@ To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc index 7e22f657ba6b6..5691280a30dd7 100644 --- a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc +++ b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc @@ -6,6 +6,12 @@ preview::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Sets connector sync job stats. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc b/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc index fbd3f887758f2..8df49c43c128e 100644 --- a/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `api_key_id` and/or `api_key_secret_id` field(s) of a connector, specifying: . The ID of the API key used for authorization diff --git a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc index 4b25f9e71ae4b..d02c332d7f34b 100644 --- a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates a connector's `configuration`, allowing for config value updates within a registered configuration schema. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/update-connector-error-api.asciidoc b/docs/reference/connector/apis/update-connector-error-api.asciidoc index 29358b243041a..859a48c31c0ca 100644 --- a/docs/reference/connector/apis/update-connector-error-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-error-api.asciidoc @@ -6,6 +6,12 @@ preview::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `error` field of a connector. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/update-connector-features-api.asciidoc b/docs/reference/connector/apis/update-connector-features-api.asciidoc index 77571fcd7d5a0..74c512e42cd1b 100644 --- a/docs/reference/connector/apis/update-connector-features-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-features-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Manages the `features` of a connector. This endpoint can be used to control the following aspects of a connector: * document-level security diff --git a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc index 4820fa151901d..7ba0080cde28f 100644 --- a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the draft `filtering` configuration of a connector and marks the draft validation state as `edited`. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. Learn more in the <>. diff --git a/docs/reference/connector/apis/update-connector-index-name-api.asciidoc b/docs/reference/connector/apis/update-connector-index-name-api.asciidoc index 6222baf6a6caf..fbbef6e66ca40 100644 --- a/docs/reference/connector/apis/update-connector-index-name-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-index-name-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc index 17f892d852f4a..d827d25c12b4e 100644 --- a/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc @@ -6,6 +6,12 @@ preview::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the fields related to the last sync of a connector. This action is used for analytics and monitoring. diff --git a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc index 384cec2c73e24..c0d0568baef35 100644 --- a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc @@ -6,6 +6,11 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- Updates the `name` and `description` fields of a connector. diff --git a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc index e54b01ec47d01..a886fe6f20da7 100644 --- a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `pipeline` configuration of a connector. When you create a new connector, the configuration of an <> is populated with default settings. diff --git a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc index 64302c26a7231..eed3d14ea1d97 100644 --- a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `scheduling` configuration of a connector. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/update-connector-service-type-api.asciidoc b/docs/reference/connector/apis/update-connector-service-type-api.asciidoc index c02967d03e2dd..28b4c72b682d2 100644 --- a/docs/reference/connector/apis/update-connector-service-type-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-service-type-api.asciidoc @@ -6,6 +6,12 @@ beta::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `service_type` of a connector. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/apis/update-connector-status-api.asciidoc b/docs/reference/connector/apis/update-connector-status-api.asciidoc index dadd93fe5f9c4..7812cbff89d1b 100644 --- a/docs/reference/connector/apis/update-connector-status-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-status-api.asciidoc @@ -6,6 +6,12 @@ preview::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-connector[Connector APIs]. +-- + Updates the `status` of a connector. To get started with Connector APIs, check out <>. diff --git a/docs/reference/connector/docs/connectors-content-extraction.asciidoc b/docs/reference/connector/docs/connectors-content-extraction.asciidoc index a87d38c9bf531..744fe1d87cb45 100644 --- a/docs/reference/connector/docs/connectors-content-extraction.asciidoc +++ b/docs/reference/connector/docs/connectors-content-extraction.asciidoc @@ -8,7 +8,7 @@ The logic for content extraction is defined in {connectors-python}/connectors/ut While intended primarily for PDF and Microsoft Office formats, you can use any of the <>. Enterprise Search uses an {ref}/ingest.html[Elasticsearch ingest pipeline^] to power the web crawler's binary content extraction. -The default pipeline, `ent-search-generic-ingestion`, is automatically created when Enterprise Search first starts. +The default pipeline, `search-default-ingestion`, is automatically created when Enterprise Search first starts. You can {ref}/ingest.html#create-manage-ingest-pipelines[view^] this pipeline in Kibana. Customizing your pipeline usage is also an option. diff --git a/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc b/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc index 278478c908bf0..62a99928bfb46 100644 --- a/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc +++ b/docs/reference/connector/docs/connectors-filter-extract-transform.asciidoc @@ -13,7 +13,7 @@ The following diagram provides an overview of how content extraction, sync rules [.screenshot] image::images/pipelines-extraction-sync-rules.png[Architecture diagram of data pipeline with content extraction, sync rules, and ingest pipelines] -By default, only the connector specific logic (2) and the default `ent-search-generic-ingestion` pipeline (6) extract and transform your data, as configured in your deployment. +By default, only the connector specific logic (2) and the default `search-default-ingestion` pipeline (6) extract and transform your data, as configured in your deployment. The following tools are available for more advanced use cases: @@ -50,4 +50,4 @@ Use ingest pipelines for data enrichment, normalization, and more. Elastic connectors use a default ingest pipeline, which you can copy and customize to meet your needs. -Refer to {ref}/ingest-pipeline-search.html[ingest pipelines in Search] in the {es} documentation. \ No newline at end of file +Refer to {ref}/ingest-pipeline-search.html[ingest pipelines in Search] in the {es} documentation. diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 02f598c16f63c..d09e089f194ad 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -75,12 +75,10 @@ Follow these steps: * Leave the *Redirect URIs* blank for now. * *Register* the application. * Find and keep the **Application (client) ID** and **Directory (tenant) ID** handy. -* Locate the **Secret** by navigating to **Client credentials: Certificates & Secrets**. -* Select **New client secret** -* Pick a name for your client secret. -Select an expiration date. (At this expiration date, you will need to generate a new secret and update your connector configuration.) -** Save the client secret **Secret ID** before leaving this screen. -** Save the client secret **Value** before leaving this screen. +* Create a certificate and private key. This can, for example, be done by running `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure_app.key -out azure_app.crt` command. Store both in a safe and secure place +* Locate the **Certificates** by navigating to **Client credentials: Certificates & Secrets**. +* Select **Upload certificate** +* Upload the certificate created in one of previous steps: `azure_app.crt` * Set up the permissions the OAuth App will request from the Azure Portal service account. ** Navigate to **API Permissions** and click **Add Permission**. ** Add **application permissions** until the list looks like the following: @@ -114,6 +112,24 @@ When entities are not available via the Graph API the connector falls back to us [discrete#es-connectors-sharepoint-online-oauth-app-permissions] ====== SharePoint permissions +Microsoft is https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/retirement-announcement-for-azure-acs[retiring Azure Access Control Service (ACS)]. This affects permission configuration: + +* *Tenants created after November 1st, 2024*: Certificate authentication is required +* *Tenants created before November 1st, 2024*: Secret-based authentication must be migrated to certificate authentication by April 2nd, 2026 + +[discrete#es-connectors-sharepoint-online-oauth-app-certificate-auth] +===== Certificate Authentication + +This authentication method does not require additional setup other than creating and uploading certificates to the OAuth App. + +[discrete#es-connectors-sharepoint-online-oauth-app-secret-auth] +===== Secret Authentication + +[IMPORTANT] +==== +This method is only applicable to tenants created before November 1st, 2024. This method will be fully retired as of April 2nd, 2026. +==== + Refer to the following documentation for setting https://learn.microsoft.com/en-us/sharepoint/dev/solution-guidance/security-apponly-azureacs[SharePoint permissions^]. * To set `DisableCustomAppAuthentication` to false, connect to SharePoint using PowerShell and run `set-spotenant -DisableCustomAppAuthentication $false` @@ -133,6 +149,58 @@ The application name will appear in the Title box. ---- +[discrete#es-connectors-sharepoint-online-sites-selected-permissions] +====== Granting `Sites.Selected` permissions + +To configure `Sites.Selected` permissions, follow these steps in the Azure Active Directory portal. These permissions enable precise access control to specific SharePoint sites. + +. Sign in to the https://portal.azure.com/[Azure Active Directory portal^]. +. Navigate to **App registrations** and locate the application created for the connector. +. Under **API permissions**, click **Add permission**. +. Select **Microsoft Graph** > **Application permissions**, then add `Sites.Selected`. +. Click **Grant admin consent** to approve the permission. + +[TIP] +==== +Refer to the official https://learn.microsoft.com/en-us/graph/permissions-reference[Microsoft documentation] for managing permissions in Azure AD. +==== + +To assign access to specific SharePoint sites using `Sites.Selected`: + +. Use Microsoft Graph Explorer or PowerShell to grant access. +. To fetch the site ID, run the following Graph API query: ++ +[source, http] +---- +GET https://graph.microsoft.com/v1.0/sites?select=webUrl,Title,Id&$search="*" +---- ++ +This will return the `id` of the site. + +. Use the `id` to assign read or write access: ++ +[source, http] +---- +POST https://graph.microsoft.com/v1.0/sites//permissions +{ + "roles": ["read"], // or "write" + "grantedToIdentities": [ + { + "application": { + "id": "", + "displayName": "" + } + } + ] +} +---- + +[NOTE] +==== +When using the `Comma-separated list of sites` configuration field, ensure the sites specified match those granted `Sites.Selected` permission in SharePoint. +If the `Comma-separated list of sites` field is set to `*` or the `Enumerate all sites` toggle is enabled, the connector will attempt to access all sites. This requires broader permissions, which are not supported with `Sites.Selected`. +==== + .Graph API permissions **** Microsoft recommends using Graph API for all operations with Sharepoint Online. Graph API is well-documented and more efficient at fetching data, which helps avoid throttling. @@ -167,8 +235,17 @@ The tenant name for the Azure account hosting the Sharepoint Online instance. Client ID:: The client id to authenticate with SharePoint Online. +Authentication Method:: +Authentication method to use to connector to Sharepoint Online and Rest APIs. `secret` is deprecated and `certificate` is recommended. + Secret value:: -The secret value to authenticate with SharePoint Online. +The secret value to authenticate with SharePoint Online, if Authentication Method: `secret` is chosen. + +Content of certificate file:: +Content of certificate file if Authentication Method: `certificate` is chosen. + +Content of private key file:: +Content of private key file if Authentication Method: `certificate` is chosen. Comma-separated list of sites:: List of site collection names or paths to fetch from SharePoint. @@ -536,12 +613,10 @@ Follow these steps: * Leave the *Redirect URIs* blank for now. * *Register* the application. * Find and keep the **Application (client) ID** and **Directory (tenant) ID** handy. -* Locate the **Secret** by navigating to **Client credentials: Certificates & Secrets**. -* Select **New client secret** -* Pick a name for your client secret. -Select an expiration date. (At this expiration date, you will need to generate a new secret and update your connector configuration.) -** Save the client secret **Secret ID** before leaving this screen. -** Save the client secret **Value** before leaving this screen. +* Create a certificate and private key. This can, for example, be done by running `openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout azure_app.key -out azure_app.crt` command. Store both in a safe and secure place +* Locate the **Certificates** by navigating to **Client credentials: Certificates & Secrets**. +* Select **Upload certificate** +* Upload the certificate created in one of previous steps: `azure_app.crt` * Set up the permissions the OAuth App will request from the Azure Portal service account. ** Navigate to **API Permissions** and click **Add Permission**. ** Add **application permissions** until the list looks like the following: @@ -575,6 +650,23 @@ When entities are not available via the Graph API the connector falls back to us [discrete#es-connectors-sharepoint-online-client-oauth-app-permissions] ====== SharePoint permissions +Microsoft is https://learn.microsoft.com/en-us/sharepoint/dev/sp-add-ins/retirement-announcement-for-azure-acs[retiring Azure Access Control Service (ACS)]. This affects permission configuration: +* *Tenants created after November 1st, 2024*: Certificate authentication is required +* *Tenants created before November 1st, 2024*: Secret-based authentication must be migrated to certificate authentication by April 2nd, 2026 + +[discrete#es-connectors-sharepoint-online-client-oauth-app-certificate-auth] +===== Certificate Authentication + +This authentication method does not require additional setup other than creating and uploading certificates to the OAuth App. + +[discrete#es-connectors-sharepoint-online-client-oauth-app-secret-auth] +===== Secret Authentication + +[IMPORTANT] +==== +This method is only applicable to tenants created before November 1st, 2024. This method will be fully retired as of April 2nd, 2026. +==== + Refer to the following documentation for setting https://learn.microsoft.com/en-us/sharepoint/dev/solution-guidance/security-apponly-azureacs[SharePoint permissions^]. * To set `DisableCustomAppAuthentication` to false, connect to SharePoint using PowerShell and run `set-spotenant -DisableCustomAppAuthentication $false` @@ -594,6 +686,59 @@ The application name will appear in the Title box. ---- +[discrete#es-connectors-sharepoint-online-sites-selected-permissions-self-managed] +====== Granting `Sites.Selected` permissions + +To configure `Sites.Selected` permissions, follow these steps in the Azure Active Directory portal. These permissions enable precise access control to specific SharePoint sites. + +. Sign in to the https://portal.azure.com/[Azure Active Directory portal^]. +. Navigate to **App registrations** and locate the application created for the connector. +. Under **API permissions**, click **Add permission**. +. Select **Microsoft Graph** > **Application permissions**, then add `Sites.Selected`. +. Click **Grant admin consent** to approve the permission. + +[TIP] +==== +Refer to the official https://learn.microsoft.com/en-us/graph/permissions-reference[Microsoft documentation] for managing permissions in Azure AD. +==== + + +To assign access to specific SharePoint sites using `Sites.Selected`: + +. Use Microsoft Graph Explorer or PowerShell to grant access. +. To fetch the site ID, run the following Graph API query: ++ +[source, http] +---- +GET https://graph.microsoft.com/v1.0/sites?select=webUrl,Title,Id&$search="*" +---- ++ +This will return the `id` of the site. + +. Use the `id` to assign read or write access: ++ +[source, http] +---- +POST https://graph.microsoft.com/v1.0/sites//permissions +{ + "roles": ["read"], // or "write" + "grantedToIdentities": [ + { + "application": { + "id": "", + "displayName": "" + } + } + ] +} +---- + +[NOTE] +==== +When using the `Comma-separated list of sites` configuration field, ensure the sites specified match those granted `Sites.Selected` permission in SharePoint. +If the `Comma-separated list of sites` field is set to `*` or the `Enumerate all sites` toggle is enabled, the connector will attempt to access all sites. This requires broader permissions, which are not supported with `Sites.Selected`. +==== + .Graph API permissions **** Microsoft recommends using Graph API for all operations with Sharepoint Online. Graph API is well-documented and more efficient at fetching data, which helps avoid throttling. @@ -637,8 +782,17 @@ The tenant name for the Azure account hosting the Sharepoint Online instance. `client_id`:: The client id to authenticate with SharePoint Online. +`auth_method`:: +Authentication method to use to connector to Sharepoint Online and Rest APIs. `secret` is deprecated and `certificate` is recommended. + `secret_value`:: -The secret value to authenticate with SharePoint Online. +The secret value to authenticate with SharePoint Online, if auth_method: `secret` is chosen. + +`certificate`:: +Content of certificate file if auth_method: `certificate` is chosen. + +`private_key`:: +Content of private key file if auth_method: `certificate` is chosen. `site_collections`:: List of site collection names or paths to fetch from SharePoint. diff --git a/docs/reference/data-management.asciidoc b/docs/reference/data-management.asciidoc index 4245227a1524d..4da62e5b2c7c0 100644 --- a/docs/reference/data-management.asciidoc +++ b/docs/reference/data-management.asciidoc @@ -6,29 +6,27 @@ -- The data you store in {es} generally falls into one of two categories: -* Content: a collection of items you want to search, such as a catalog of products -* Time series data: a stream of continuously-generated timestamped data, such as log entries +* *Content*: a collection of items you want to search, such as a catalog of products +* *Time series data*: a stream of continuously-generated timestamped data, such as log entries -Content might be frequently updated, +*Content* might be frequently updated, but the value of the content remains relatively constant over time. You want to be able to retrieve items quickly regardless of how old they are. -Time series data keeps accumulating over time, so you need strategies for +*Time series data* keeps accumulating over time, so you need strategies for balancing the value of the data against the cost of storing it. As it ages, it tends to become less important and less-frequently accessed, so you can move it to less expensive, less performant hardware. For your oldest data, what matters is that you have access to the data. It's ok if queries take longer to complete. -To help you manage your data, {es} offers you: +To help you manage your data, {es} offers you the following options: -* <> ({ilm-init}) to manage both indices and data streams and it is fully customisable, and -* <> which is the built-in lifecycle of data streams and addresses the most -common lifecycle management needs. +* <> +* <> +* {curator-ref-current}/about.html[Elastic Curator] -preview::["The built-in data stream lifecycle is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] - -**{ilm-init}** can be used to manage both indices and data streams and it allows you to: +**{ilm-init}** can be used to manage both indices and data streams. It allows you to do the following: * Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. Data older than this period can be deleted by {es}. @@ -38,12 +36,24 @@ Data older than this period can be deleted by {es}. for your older indices while reducing operating costs and maintaining search performance. * Perform <> of data stored on less-performant hardware. -**Data stream lifecycle** is less feature rich but is focused on simplicity, so it allows you to easily: +**Data stream lifecycle** is less feature rich but is focused on simplicity. It allows you to do the following: * Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. Data older than this period can be deleted by {es} at a later time. -* Improve the performance of your data stream by performing background operations that will optimise the way your data -stream is stored. +* Improve the performance of your data stream by performing background operations that will optimise the way your data stream is stored. + +**Elastic Curator** is a tool that allows you to manage your indices and snapshots using user-defined filters and predefined actions. If ILM provides the functionality to manage your index lifecycle, and you have at least a Basic license, consider using ILM in place of Curator. Many stack components make use of ILM by default. {curator-ref-current}/ilm.html[Learn more]. + +NOTE: <> is a deprecated {es} feature that allows you to manage the amount of data that is stored in your cluster, similar to the downsampling functionality of {ilm-init} and data stream lifecycle. This feature should not be used for new deployments. + +[TIP] +==== +{ilm-init} is not available on {es-serverless}. + +In an {ecloud} or self-managed environment, ILM lets you automatically transition indices through data tiers according to your performance needs and retention requirements. This allows you to balance hardware costs with performance. {es-serverless} eliminates this complexity by optimizing your cluster performance for you. + +Data stream lifecycle is an optimized lifecycle tool that lets you focus on the most common lifecycle management needs, without unnecessary hardware-centric concepts like data tiers. +==== -- include::ilm/index.asciidoc[] diff --git a/docs/reference/data-management/migrate-index-allocation-filters.asciidoc b/docs/reference/data-management/migrate-index-allocation-filters.asciidoc index 85d42e4105a92..ee7d5640d53df 100644 --- a/docs/reference/data-management/migrate-index-allocation-filters.asciidoc +++ b/docs/reference/data-management/migrate-index-allocation-filters.asciidoc @@ -2,7 +2,7 @@ [[migrate-index-allocation-filters]] == Migrate index allocation filters to node roles -If you currently use custom node attributes and +If you currently use <> and <> to move indices through <> in a https://www.elastic.co/blog/implementing-hot-warm-cold-in-elasticsearch-with-index-lifecycle-management[hot-warm-cold architecture], diff --git a/docs/reference/data-store-architecture.asciidoc b/docs/reference/data-store-architecture.asciidoc new file mode 100644 index 0000000000000..a0d504eb117c8 --- /dev/null +++ b/docs/reference/data-store-architecture.asciidoc @@ -0,0 +1,24 @@ += Data store architecture + +[partintro] +-- + +{es} is a distributed document store. Instead of storing information as rows of columnar data, {es} stores complex data structures that have been serialized as JSON documents. When you have multiple {es} nodes in a cluster, stored documents are distributed across the cluster and can be accessed immediately +from any node. + +The topics in this section provides information about the architecture of {es} and how it stores and retrieves data: + +* <>: Learn about the basic building blocks of an {es} cluster, including nodes, shards, primaries, and replicas. +* <>: Learn about the different roles that nodes can have in an {es} cluster. +* <>: Learn how {es} replicates read and write operations across shards and shard copies. +* <>: Learn how {es} allocates and balances shards across nodes. +** <>: Learn how to use custom node attributes to distribute shards across different racks or availability zones. +* <>: Learn how {es} caches search requests to improve performance. +-- + +include::nodes-shards.asciidoc[] +include::node-roles.asciidoc[] +include::docs/data-replication.asciidoc[leveloffset=-1] +include::modules/shard-ops.asciidoc[] +include::modules/cluster/allocation_awareness.asciidoc[leveloffset=+1] +include::shard-request-cache.asciidoc[leveloffset=-1] diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index c13703ab2a6ee..8b952fad59f8d 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -2,6 +2,12 @@ [[data-stream-apis]] == Data stream APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + The following APIs are available for managing <>: * <> diff --git a/docs/reference/data-streams/downsampling.asciidoc b/docs/reference/data-streams/downsampling.asciidoc index 0b08b0972f9a1..10a0241cf0732 100644 --- a/docs/reference/data-streams/downsampling.asciidoc +++ b/docs/reference/data-streams/downsampling.asciidoc @@ -72,6 +72,45 @@ the granularity of `cold` archival data to monthly or less. .Downsampled metrics series image::images/data-streams/time-series-downsampled.png[align="center"] +[discrete] +[[downsample-api-process]] +==== The downsampling process + +The downsampling operation traverses the source TSDS index and performs the +following steps: + +. Creates a new document for each value of the `_tsid` field and each +`@timestamp` value, rounded to the `fixed_interval` defined in the downsample +configuration. +. For each new document, copies all <> from the source index to the target index. Dimensions in a +TSDS are constant, so this is done only once per bucket. +. For each <> field, computes aggregations +for all documents in the bucket. Depending on the metric type of each metric +field a different set of pre-aggregated results is stored: + +** `gauge`: The `min`, `max`, `sum`, and `value_count` are stored; `value_count` +is stored as type `aggregate_metric_double`. +** `counter`: The `last_value` is stored. +. For all other fields, the most recent value is copied to the target index. + +[discrete] +[[downsample-api-mappings]] +==== Source and target index field mappings + +Fields in the target, downsampled index are created based on fields in the +original source index, as follows: + +. All fields mapped with the `time-series-dimension` parameter are created in +the target downsample index with the same mapping as in the source index. +. All fields mapped with the `time_series_metric` parameter are created +in the target downsample index with the same mapping as in the source +index. An exception is that for fields mapped as `time_series_metric: gauge` +the field type is changed to `aggregate_metric_double`. +. All other fields that are neither dimensions nor metrics (that is, label +fields), are created in the target downsample index with the same mapping +that they had in the source index. + [discrete] [[running-downsampling]] === Running downsampling on time series data diff --git a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc index 315f7fa85e45f..5222d33b5870b 100644 --- a/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc @@ -4,6 +4,12 @@ Delete Data Stream Lifecycle ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Deletes the <> from a set of data streams. [[delete-lifecycle-api-prereqs]] diff --git a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc index 2b15886ebe192..8289fb54d51bd 100644 --- a/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc @@ -4,6 +4,12 @@ Explain Data Stream Lifecycle ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Retrieves the current <> status for one or more data stream backing indices. [[explain-lifecycle-api-prereqs]] diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc index f48fa1eb52daa..0fbe7de287f7b 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc @@ -4,6 +4,12 @@ Get Data Stream Lifecycle ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Gets stats about the execution of <>. [[get-lifecycle-stats-api-prereqs]] diff --git a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 6323fac1eac2f..57d63fee2ddc1 100644 --- a/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -4,6 +4,12 @@ Get Data Stream Lifecycle ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Gets the <> of a set of <>. [[get-lifecycle-api-prereqs]] diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index c60c105e818ab..c5002cf4882e2 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -4,6 +4,12 @@ Put Data Stream Lifecycle ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Configures the data stream <> for the targeted <>. [[put-lifecycle-api-prereqs]] diff --git a/docs/reference/data-streams/modify-data-streams-api.asciidoc b/docs/reference/data-streams/modify-data-streams-api.asciidoc index 2da869083df22..2f717f9ec3b4b 100644 --- a/docs/reference/data-streams/modify-data-streams-api.asciidoc +++ b/docs/reference/data-streams/modify-data-streams-api.asciidoc @@ -4,6 +4,12 @@ Modify data streams ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Performs one or more <> modification actions in a single atomic operation. diff --git a/docs/reference/data-streams/promote-data-stream-api.asciidoc b/docs/reference/data-streams/promote-data-stream-api.asciidoc index 5ba9c4d9fad0e..33005e80e9408 100644 --- a/docs/reference/data-streams/promote-data-stream-api.asciidoc +++ b/docs/reference/data-streams/promote-data-stream-api.asciidoc @@ -5,6 +5,12 @@ Promote data stream ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + The purpose of the promote <> API is to turn a data stream that is replicated by CCR into a regular data stream. diff --git a/docs/reference/datatiers.asciidoc b/docs/reference/datatiers.asciidoc index 65e029d876e6f..066765368ec5e 100644 --- a/docs/reference/datatiers.asciidoc +++ b/docs/reference/datatiers.asciidoc @@ -189,7 +189,7 @@ tier]. [[configure-data-tiers-on-premise]] ==== Self-managed deployments -For self-managed deployments, each node's <> is configured +For self-managed deployments, each node's <> is configured in `elasticsearch.yml`. For example, the highest-performance nodes in a cluster might be assigned to both the hot and content tiers: diff --git a/docs/reference/docs.asciidoc b/docs/reference/docs.asciidoc index ff2d823410a6d..ccdbaaffb2b77 100644 --- a/docs/reference/docs.asciidoc +++ b/docs/reference/docs.asciidoc @@ -1,9 +1,13 @@ [[docs]] == Document APIs -This section starts with a short introduction to {es}'s <>, followed by a detailed description of the following CRUD -APIs: +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + +This section describes the following CRUD APIs: .Single document APIs * <> @@ -18,8 +22,6 @@ APIs: * <> * <> -include::docs/data-replication.asciidoc[] - include::docs/index_.asciidoc[] include::docs/get.asciidoc[] diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 6edccfcdb13f5..78169e841dab4 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -4,6 +4,12 @@ Bulk ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Performs multiple indexing or delete operations in a single API call. This reduces overhead and can greatly increase indexing speed. diff --git a/docs/reference/docs/data-replication.asciidoc b/docs/reference/docs/data-replication.asciidoc index 2c1a16c81d011..6ee266070e727 100644 --- a/docs/reference/docs/data-replication.asciidoc +++ b/docs/reference/docs/data-replication.asciidoc @@ -1,6 +1,6 @@ [[docs-replication]] -=== Reading and Writing documents +=== Reading and writing documents [discrete] ==== Introduction diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 452d7f7758bfa..046a20abdaffb 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -4,6 +4,12 @@ Delete ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Removes a JSON document from the specified index. [[docs-delete-api-request]] diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index c71215fff8d73..a3ff70fb95f6e 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -4,6 +4,12 @@ Get ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Retrieves the specified JSON document from an index. [source,console] diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index 7c3eafa9c79f3..293bd2568a34b 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -4,6 +4,12 @@ Multi get ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Retrieves multiple JSON documents by ID. [source,console] diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index dc27e40ecd90b..2f6ddd344eaab 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -4,6 +4,12 @@ Reindex ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Copies documents from a source to a destination. The source can be any existing index, alias, or data stream. The destination diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index 31dfba1ac2668..d40452fb4875a 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -4,6 +4,12 @@ Term vectors ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Retrieves information and statistics for terms in the fields of a particular document. [source,console] diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index d470080fc602f..c8d68082c8ea1 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -4,6 +4,12 @@ Update by query ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index a212c4e152b0e..ae9ae8fe73fc6 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -4,6 +4,12 @@ Update ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-document[Document APIs]. +-- + Updates a document using the specified script. [[docs-update-api-request]] diff --git a/docs/reference/eql/delete-async-eql-search-api.asciidoc b/docs/reference/eql/delete-async-eql-search-api.asciidoc index 2f52a1a9cb234..2783c9ac0b871 100644 --- a/docs/reference/eql/delete-async-eql-search-api.asciidoc +++ b/docs/reference/eql/delete-async-eql-search-api.asciidoc @@ -6,6 +6,12 @@ Delete async EQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. +-- + Deletes an <> or a <>. The API also deletes results for the search. diff --git a/docs/reference/eql/eql-apis.asciidoc b/docs/reference/eql/eql-apis.asciidoc index e8cc2b21492ae..4d6aafd2039df 100644 --- a/docs/reference/eql/eql-apis.asciidoc +++ b/docs/reference/eql/eql-apis.asciidoc @@ -1,6 +1,12 @@ [[eql-apis]] == EQL APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. +-- + <> is a query language for event-based time series data, such as logs, metrics, and traces. For an overview of EQL and related tutorials, see <>. diff --git a/docs/reference/eql/eql-search-api.asciidoc b/docs/reference/eql/eql-search-api.asciidoc index d7f10f4627f6c..544e4d7325c5b 100644 --- a/docs/reference/eql/eql-search-api.asciidoc +++ b/docs/reference/eql/eql-search-api.asciidoc @@ -6,6 +6,12 @@ EQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. +-- + Returns search results for an <> query. EQL assumes each document in a data stream or index corresponds to an @@ -88,6 +94,53 @@ request that targets only `bar*` still returns an error. + Defaults to `true`. +`allow_partial_search_results`:: +(Optional, Boolean) + +If `false`, the request returns an error if one or more shards involved in the query are unavailable. ++ +If `true`, the query is executed only on the available shards, ignoring shard request timeouts and +<>. ++ +Defaults to `false`. ++ +To override the default for this field, set the +`xpack.eql.default_allow_partial_results` cluster setting to `true`. + + +[IMPORTANT] +==== +You can also specify this value using the `allow_partial_search_results` request body parameter. +If both parameters are specified, only the query parameter is used. +==== + + +`allow_partial_sequence_results`:: +(Optional, Boolean) + + +Used together with `allow_partial_search_results=true`, controls the behavior of sequence queries specifically +(if `allow_partial_search_results=false`, this setting has no effect). +If `true` and if some shards are unavailable, the sequences are calculated on available shards only. ++ +If `false` and if some shards are unavailable, the query only returns information about the shard failures, +but no further results. ++ +Defaults to `false`. ++ +Consider that sequences calculated with `allow_partial_search_results=true` can return incorrect results +(eg. if a <> clause matches records in unavailable shards) ++ +To override the default for this field, set the +`xpack.eql.default_allow_partial_sequence_results` cluster setting to `true`. + + +[IMPORTANT] +==== +You can also specify this value using the `allow_partial_sequence_results` request body parameter. +If both parameters are specified, only the query parameter is used. +==== + `ccs_minimize_roundtrips`:: (Optional, Boolean) If `true`, network round-trips between the local and the remote cluster are minimized when running cross-cluster search (CCS) requests. diff --git a/docs/reference/eql/get-async-eql-search-api.asciidoc b/docs/reference/eql/get-async-eql-search-api.asciidoc index 2ac4271b5b986..ff4a07811b732 100644 --- a/docs/reference/eql/get-async-eql-search-api.asciidoc +++ b/docs/reference/eql/get-async-eql-search-api.asciidoc @@ -6,6 +6,12 @@ Get async EQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. +-- + Returns the current status and available results for an <> or a <>. diff --git a/docs/reference/eql/get-async-eql-status-api.asciidoc b/docs/reference/eql/get-async-eql-status-api.asciidoc index 908a65773f6aa..cb3904a644921 100644 --- a/docs/reference/eql/get-async-eql-status-api.asciidoc +++ b/docs/reference/eql/get-async-eql-status-api.asciidoc @@ -5,6 +5,13 @@ ++++ Get async EQL search status ++++ + +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-eql[EQL APIs]. +-- + Returns the current status for an <> or a <> without returning results. This is a more lightweight API than diff --git a/docs/reference/esql/esql-apis.asciidoc b/docs/reference/esql/esql-apis.asciidoc index 8586cd1ae6bce..157f4e4357e78 100644 --- a/docs/reference/esql/esql-apis.asciidoc +++ b/docs/reference/esql/esql-apis.asciidoc @@ -1,6 +1,14 @@ [[esql-apis]] == {esql} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. +-- + +https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-esql-query + The <> provides a powerful way to filter, transform, and analyze data stored in {es}, and in the future in other runtimes. For an overview of {esql} and related tutorials, see <>. diff --git a/docs/reference/esql/esql-async-query-api.asciidoc b/docs/reference/esql/esql-async-query-api.asciidoc index 6cd23fc524f96..8cb974cf6773b 100644 --- a/docs/reference/esql/esql-async-query-api.asciidoc +++ b/docs/reference/esql/esql-async-query-api.asciidoc @@ -4,6 +4,12 @@ {esql} async query API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. +-- + Runs an async <>. The async query API lets you asynchronously execute a query request, diff --git a/docs/reference/esql/esql-async-query-delete-api.asciidoc b/docs/reference/esql/esql-async-query-delete-api.asciidoc index 5cad566f7f9c0..421c59191f3bd 100644 --- a/docs/reference/esql/esql-async-query-delete-api.asciidoc +++ b/docs/reference/esql/esql-async-query-delete-api.asciidoc @@ -4,6 +4,12 @@ {esql} async query delete API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. +-- + The <> async query delete API is used to manually delete an async query by ID. If the query is still running, the query will be cancelled. Otherwise, the stored results are deleted. diff --git a/docs/reference/esql/esql-async-query-get-api.asciidoc b/docs/reference/esql/esql-async-query-get-api.asciidoc index 82a6ae5b28b51..693e96861adb9 100644 --- a/docs/reference/esql/esql-async-query-get-api.asciidoc +++ b/docs/reference/esql/esql-async-query-get-api.asciidoc @@ -4,6 +4,12 @@ {esql} async query get API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. +-- + Returns the current status and available results for an <> or a stored results. diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index c2849e4889f98..adfd38478ab21 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -30,11 +30,11 @@ include::processing-commands/limit.asciidoc[tag=limitation] ** You can use `to_datetime` to cast to millisecond dates to use unsupported functions * `double` (`float`, `half_float`, `scaled_float` are represented as `double`) * `ip` -* `keyword` family including `keyword`, `constant_keyword`, and `wildcard` +* `keyword` <> including `keyword`, `constant_keyword`, and `wildcard` * `int` (`short` and `byte` are represented as `int`) * `long` * `null` -* `text` +* `text` <> including `text`, `semantic_text` and `match_only_text` * experimental:[] `unsigned_long` * `version` * Spatial types @@ -112,7 +112,7 @@ it is necessary to use the search function, like <>, in a <> source command, or close enough to it. Otherwise, the query will fail with a validation error. Another limitation is that any <> command containing a full-text search function -cannot also use disjunctions (`OR`). +cannot also use disjunctions (`OR`) unless all functions used in the OR clauses are full-text functions themselves. For example, this query is valid: @@ -139,9 +139,18 @@ FROM books | WHERE MATCH(author, "Faulkner") OR author LIKE "Hemingway" ---- +However this query will succeed because it uses full text functions on both `OR` clauses: + +[source,esql] +---- +FROM books +| WHERE MATCH(author, "Faulkner") OR QSTR("author: Hemingway") +---- + + Note that, because of <>, any queries on `text` fields that do not explicitly use the full-text functions, -<> or <>, will behave as if the fields are actually `keyword` fields: +<>, <> or <>, will behave as if the fields are actually `keyword` fields: they are case-sensitive and need to match the full string. [discrete] diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index 8e07a627567df..eac66ecfde2dd 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -4,6 +4,12 @@ {esql} query API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs]. +-- + Returns search results for an <> query. [source,console] diff --git a/docs/reference/esql/functions/description/hash.asciidoc b/docs/reference/esql/functions/description/hash.asciidoc new file mode 100644 index 0000000000000..e074915c5132a --- /dev/null +++ b/docs/reference/esql/functions/description/hash.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. diff --git a/docs/reference/esql/functions/description/match.asciidoc b/docs/reference/esql/functions/description/match.asciidoc index 25f0571878d47..0724f0f108e3c 100644 --- a/docs/reference/esql/functions/description/match.asciidoc +++ b/docs/reference/esql/functions/description/match.asciidoc @@ -2,4 +2,4 @@ *Description* -Performs a <> on the specified field. Returns true if the provided query matches the row. +Use `MATCH` to perform a <> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on fields from the text family like <> and <>, as well as other field types like keyword, boolean, dates, and numeric types. For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row. diff --git a/docs/reference/esql/functions/description/md5.asciidoc b/docs/reference/esql/functions/description/md5.asciidoc new file mode 100644 index 0000000000000..2ad847c0ce0e3 --- /dev/null +++ b/docs/reference/esql/functions/description/md5.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the MD5 hash of the input. diff --git a/docs/reference/esql/functions/description/sha1.asciidoc b/docs/reference/esql/functions/description/sha1.asciidoc new file mode 100644 index 0000000000000..5bc29f86cc591 --- /dev/null +++ b/docs/reference/esql/functions/description/sha1.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the SHA1 hash of the input. diff --git a/docs/reference/esql/functions/description/sha256.asciidoc b/docs/reference/esql/functions/description/sha256.asciidoc new file mode 100644 index 0000000000000..b2a7ef01e1069 --- /dev/null +++ b/docs/reference/esql/functions/description/sha256.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the SHA256 hash of the input. diff --git a/docs/reference/esql/functions/description/to_date_nanos.asciidoc b/docs/reference/esql/functions/description/to_date_nanos.asciidoc index 3fac7295f1bed..955c19b43a12f 100644 --- a/docs/reference/esql/functions/description/to_date_nanos.asciidoc +++ b/docs/reference/esql/functions/description/to_date_nanos.asciidoc @@ -4,4 +4,4 @@ Converts an input to a nanosecond-resolution date value (aka date_nanos). -NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch. +NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch. diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index 4afea30660339..264efc191748f 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -116,4 +116,18 @@ include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression] |=== include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression-result] |=== +Sometimes you need to change the start value of each bucket by a given duration (similar to date histogram +aggregation's <> parameter). To do so, you will need to +take into account how the language handles expressions within the `STATS` command: if these contain functions or +arithmetic operators, a virtual `EVAL` is inserted before and/or after the `STATS` command. Consequently, a double +compensation is needed to adjust the bucketed date value before the aggregation and then again after. For instance, +inserting a negative offset of `1 hour` to buckets of `1 year` looks like this: +[source.merge.styled,esql] +---- +include::{esql-specs}/bucket.csv-spec[tag=bucketWithOffset] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/bucket.csv-spec[tag=bucketWithOffset-result] +|=== diff --git a/docs/reference/esql/functions/examples/hash.asciidoc b/docs/reference/esql/functions/examples/hash.asciidoc new file mode 100644 index 0000000000000..492e466eb395e --- /dev/null +++ b/docs/reference/esql/functions/examples/hash.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=hash] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=hash-result] +|=== + diff --git a/docs/reference/esql/functions/examples/md5.asciidoc b/docs/reference/esql/functions/examples/md5.asciidoc new file mode 100644 index 0000000000000..0b43bc5b791c9 --- /dev/null +++ b/docs/reference/esql/functions/examples/md5.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=md5] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=md5-result] +|=== + diff --git a/docs/reference/esql/functions/examples/sha1.asciidoc b/docs/reference/esql/functions/examples/sha1.asciidoc new file mode 100644 index 0000000000000..77786431a738a --- /dev/null +++ b/docs/reference/esql/functions/examples/sha1.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=sha1] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=sha1-result] +|=== + diff --git a/docs/reference/esql/functions/examples/sha256.asciidoc b/docs/reference/esql/functions/examples/sha256.asciidoc new file mode 100644 index 0000000000000..801c36d8effc8 --- /dev/null +++ b/docs/reference/esql/functions/examples/sha256.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=sha256] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=sha256-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 18802f5ff8fef..f9c7f2f27d6f9 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -1598,7 +1598,8 @@ "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS c = COUNT(1) BY b = BUCKET(salary, 5000.)\n| SORT b", "FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())", "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket", - "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2" + "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2", + "FROM employees\n| STATS dates = MV_SORT(VALUES(birth_date)) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count, b\n| LIMIT 3" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json index 6e2738fafb964..629415da30fa2 100644 --- a/docs/reference/esql/functions/kibana/definition/date_format.json +++ b/docs/reference/esql/functions/kibana/definition/date_format.json @@ -4,6 +4,18 @@ "name" : "date_format", "description" : "Returns a string representation of a date, in the provided format.", "signatures" : [ + { + "params" : [ + { + "name" : "dateFormat", + "type" : "date", + "optional" : true, + "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/hash.json b/docs/reference/esql/functions/kibana/definition/hash.json new file mode 100644 index 0000000000000..dbf4a2542afc5 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/hash.json @@ -0,0 +1,85 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "hash", + "description" : "Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512.", + "signatures" : [ + { + "params" : [ + { + "name" : "algorithm", + "type" : "keyword", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "keyword", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "text", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "algorithm", + "type" : "text", + "optional" : false, + "description" : "Hash algorithm to use." + }, + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = hash(\"md5\", message), sha256 = hash(\"sha256\", message) \n| KEEP message, md5, sha256;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/kql.json b/docs/reference/esql/functions/kibana/definition/kql.json index 6960681fbbf0d..440786ec63e77 100644 --- a/docs/reference/esql/functions/kibana/definition/kql.json +++ b/docs/reference/esql/functions/kibana/definition/kql.json @@ -33,5 +33,5 @@ "FROM books \n| WHERE KQL(\"author: Faulkner\")\n| KEEP book_no, author \n| SORT book_no \n| LIMIT 5;" ], "preview" : true, - "snapshot_only" : true + "snapshot_only" : false } diff --git a/docs/reference/esql/functions/kibana/definition/match.json b/docs/reference/esql/functions/kibana/definition/match.json index 7f2a8239cc0d0..eb206cb9ddf4d 100644 --- a/docs/reference/esql/functions/kibana/definition/match.json +++ b/docs/reference/esql/functions/kibana/definition/match.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "match", - "description" : "Performs a <> on the specified field. Returns true if the provided query matches the row.", + "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on fields from the text family like <> and <>,\nas well as other field types like keyword, boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/match_operator.json b/docs/reference/esql/functions/kibana/definition/match_operator.json index 44233bbddb653..c8cbf1cf9d966 100644 --- a/docs/reference/esql/functions/kibana/definition/match_operator.json +++ b/docs/reference/esql/functions/kibana/definition/match_operator.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "operator", "name" : "match_operator", - "description" : "Performs a <> on the specified field. Returns true if the provided query matches the row.", + "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on text fields, as well as other field types like boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/md5.json b/docs/reference/esql/functions/kibana/definition/md5.json new file mode 100644 index 0000000000000..4d3a88e123ff4 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/md5.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "md5", + "description" : "Computes the MD5 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = md5(message)\n| KEEP message, md5;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/sha1.json b/docs/reference/esql/functions/kibana/definition/sha1.json new file mode 100644 index 0000000000000..a6abb31368bb3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sha1.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "sha1", + "description" : "Computes the SHA1 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha1 = sha1(message)\n| KEEP message, sha1;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/sha256.json b/docs/reference/esql/functions/kibana/definition/sha256.json new file mode 100644 index 0000000000000..700425d485b61 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sha256.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "sha256", + "description" : "Computes the SHA256 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha256 = sha256(message)\n| KEEP message, sha256;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json index d9409bceb8e6f..210b9608f9eff 100644 --- a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json +++ b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json @@ -3,7 +3,7 @@ "type" : "eval", "name" : "to_date_nanos", "description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).", - "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.", + "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.", "signatures" : [ { "params" : [ @@ -90,6 +90,6 @@ "returnType" : "date_nanos" } ], - "preview" : true, + "preview" : false, "snapshot_only" : false } diff --git a/docs/reference/esql/functions/kibana/docs/hash.md b/docs/reference/esql/functions/kibana/docs/hash.md new file mode 100644 index 0000000000000..4e937778ba67a --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/hash.md @@ -0,0 +1,13 @@ + + +### HASH +Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = hash("md5", message), sha256 = hash("sha256", message) +| KEEP message, md5, sha256; +``` diff --git a/docs/reference/esql/functions/kibana/docs/match.md b/docs/reference/esql/functions/kibana/docs/match.md index adf6de91c90f1..80bf84351c188 100644 --- a/docs/reference/esql/functions/kibana/docs/match.md +++ b/docs/reference/esql/functions/kibana/docs/match.md @@ -3,7 +3,15 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### MATCH -Performs a <> on the specified field. Returns true if the provided query matches the row. +Use `MATCH` to perform a <> on the specified field. +Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. + +Match can be used on fields from the text family like <> and <>, +as well as other field types like keyword, boolean, dates, and numeric types. + +For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. + +`MATCH` returns true if the provided query matches the row. ``` FROM books diff --git a/docs/reference/esql/functions/kibana/docs/match_operator.md b/docs/reference/esql/functions/kibana/docs/match_operator.md index b0b6196798087..7681c2d1ce231 100644 --- a/docs/reference/esql/functions/kibana/docs/match_operator.md +++ b/docs/reference/esql/functions/kibana/docs/match_operator.md @@ -3,7 +3,14 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### MATCH_OPERATOR -Performs a <> on the specified field. Returns true if the provided query matches the row. +Use `MATCH` to perform a <> on the specified field. +Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. + +Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. + +For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. + +`MATCH` returns true if the provided query matches the row. ``` FROM books diff --git a/docs/reference/esql/functions/kibana/docs/md5.md b/docs/reference/esql/functions/kibana/docs/md5.md new file mode 100644 index 0000000000000..aacb8a3960165 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/md5.md @@ -0,0 +1,13 @@ + + +### MD5 +Computes the MD5 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = md5(message) +| KEEP message, md5; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha1.md b/docs/reference/esql/functions/kibana/docs/sha1.md new file mode 100644 index 0000000000000..a940aa133f06e --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha1.md @@ -0,0 +1,13 @@ + + +### SHA1 +Computes the SHA1 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha1 = sha1(message) +| KEEP message, sha1; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha256.md b/docs/reference/esql/functions/kibana/docs/sha256.md new file mode 100644 index 0000000000000..fbe576c7c20d6 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha256.md @@ -0,0 +1,13 @@ + + +### SHA256 +Computes the SHA256 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha256 = sha256(message) +| KEEP message, sha256; +``` diff --git a/docs/reference/esql/functions/kibana/docs/to_date_nanos.md b/docs/reference/esql/functions/kibana/docs/to_date_nanos.md index 0294802485ccb..1bce8d4fca832 100644 --- a/docs/reference/esql/functions/kibana/docs/to_date_nanos.md +++ b/docs/reference/esql/functions/kibana/docs/to_date_nanos.md @@ -5,4 +5,4 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ ### TO_DATE_NANOS Converts an input to a nanosecond-resolution date value (aka date_nanos). -Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch. +Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch. diff --git a/docs/reference/esql/functions/layout/hash.asciidoc b/docs/reference/esql/functions/layout/hash.asciidoc new file mode 100644 index 0000000000000..daf7fbf1170b2 --- /dev/null +++ b/docs/reference/esql/functions/layout/hash.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-hash]] +=== `HASH` + +*Syntax* + +[.text-center] +image::esql/functions/signature/hash.svg[Embedded,opts=inline] + +include::../parameters/hash.asciidoc[] +include::../description/hash.asciidoc[] +include::../types/hash.asciidoc[] +include::../examples/hash.asciidoc[] diff --git a/docs/reference/esql/functions/layout/md5.asciidoc b/docs/reference/esql/functions/layout/md5.asciidoc new file mode 100644 index 0000000000000..82d3031d6bdfd --- /dev/null +++ b/docs/reference/esql/functions/layout/md5.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-md5]] +=== `MD5` + +*Syntax* + +[.text-center] +image::esql/functions/signature/md5.svg[Embedded,opts=inline] + +include::../parameters/md5.asciidoc[] +include::../description/md5.asciidoc[] +include::../types/md5.asciidoc[] +include::../examples/md5.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha1.asciidoc b/docs/reference/esql/functions/layout/sha1.asciidoc new file mode 100644 index 0000000000000..23e1e0e9ac2ab --- /dev/null +++ b/docs/reference/esql/functions/layout/sha1.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha1]] +=== `SHA1` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha1.svg[Embedded,opts=inline] + +include::../parameters/sha1.asciidoc[] +include::../description/sha1.asciidoc[] +include::../types/sha1.asciidoc[] +include::../examples/sha1.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha256.asciidoc b/docs/reference/esql/functions/layout/sha256.asciidoc new file mode 100644 index 0000000000000..d36a1345271f5 --- /dev/null +++ b/docs/reference/esql/functions/layout/sha256.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha256]] +=== `SHA256` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha256.svg[Embedded,opts=inline] + +include::../parameters/sha256.asciidoc[] +include::../description/sha256.asciidoc[] +include::../types/sha256.asciidoc[] +include::../examples/sha256.asciidoc[] diff --git a/docs/reference/esql/functions/layout/to_date_nanos.asciidoc b/docs/reference/esql/functions/layout/to_date_nanos.asciidoc index 977a0ac969e5d..2dfd13dac7e20 100644 --- a/docs/reference/esql/functions/layout/to_date_nanos.asciidoc +++ b/docs/reference/esql/functions/layout/to_date_nanos.asciidoc @@ -4,8 +4,6 @@ [[esql-to_date_nanos]] === `TO_DATE_NANOS` -preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] - *Syntax* [.text-center] diff --git a/docs/reference/esql/functions/parameters/hash.asciidoc b/docs/reference/esql/functions/parameters/hash.asciidoc new file mode 100644 index 0000000000000..d47a82d4ab214 --- /dev/null +++ b/docs/reference/esql/functions/parameters/hash.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`algorithm`:: +Hash algorithm to use. + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/md5.asciidoc b/docs/reference/esql/functions/parameters/md5.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/md5.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha1.asciidoc b/docs/reference/esql/functions/parameters/sha1.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha1.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha256.asciidoc b/docs/reference/esql/functions/parameters/sha256.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha256.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/search-functions.asciidoc b/docs/reference/esql/functions/search-functions.asciidoc index 238813c382c8c..a36473a7e8869 100644 --- a/docs/reference/esql/functions/search-functions.asciidoc +++ b/docs/reference/esql/functions/search-functions.asciidoc @@ -6,19 +6,23 @@ ++++ Full text functions are used to search for text in fields. -<> is used to analyze the query before it is searched. +<> is used to analyze the query before it is searched. Full text functions can be used to match <>. A multivalued field that contains a value that matches a full text query is considered to match the query. +Full text functions are significantly more performant for text search use cases on large data sets than using pattern matching or regular expressions with `LIKE` or `RLIKE` + See <> for information on the limitations of full text search. {esql} supports these full-text search functions: // tag::search_list[] +* experimental:[] <> * experimental:[] <> * experimental:[] <> // end::search_list[] +include::layout/kql.asciidoc[] include::layout/match.asciidoc[] include::layout/qstr.asciidoc[] diff --git a/docs/reference/esql/functions/signature/hash.svg b/docs/reference/esql/functions/signature/hash.svg new file mode 100644 index 0000000000000..f819e14c9d1a4 --- /dev/null +++ b/docs/reference/esql/functions/signature/hash.svg @@ -0,0 +1 @@ +HASH(algorithm,input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/md5.svg b/docs/reference/esql/functions/signature/md5.svg new file mode 100644 index 0000000000000..419af764a212e --- /dev/null +++ b/docs/reference/esql/functions/signature/md5.svg @@ -0,0 +1 @@ +MD5(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha1.svg b/docs/reference/esql/functions/signature/sha1.svg new file mode 100644 index 0000000000000..bab03a7eb88c8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha1.svg @@ -0,0 +1 @@ +SHA1(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha256.svg b/docs/reference/esql/functions/signature/sha256.svg new file mode 100644 index 0000000000000..b77126bbefbd8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha256.svg @@ -0,0 +1 @@ +SHA256(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index ce9636f5c5a3a..dd10e4c77581e 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -13,15 +13,19 @@ * <> * <> * <> +* <> * <> * <> * <> * <> +* <> * <> * <> * <> * <> * <> +* <> +* <> * <> * <> * <> @@ -37,15 +41,19 @@ include::layout/byte_length.asciidoc[] include::layout/concat.asciidoc[] include::layout/ends_with.asciidoc[] include::layout/from_base64.asciidoc[] +include::layout/hash.asciidoc[] include::layout/left.asciidoc[] include::layout/length.asciidoc[] include::layout/locate.asciidoc[] include::layout/ltrim.asciidoc[] +include::layout/md5.asciidoc[] include::layout/repeat.asciidoc[] include::layout/replace.asciidoc[] include::layout/reverse.asciidoc[] include::layout/right.asciidoc[] include::layout/rtrim.asciidoc[] +include::layout/sha1.asciidoc[] +include::layout/sha256.asciidoc[] include::layout/space.asciidoc[] include::layout/split.asciidoc[] include::layout/starts_with.asciidoc[] diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc index 9ac9ec290c07b..bd70c2789dfa2 100644 --- a/docs/reference/esql/functions/type-conversion-functions.asciidoc +++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc @@ -18,6 +18,7 @@ * <> * experimental:[] <> * <> +* <> * <> * <> * <> @@ -37,6 +38,7 @@ include::layout/to_cartesianpoint.asciidoc[] include::layout/to_cartesianshape.asciidoc[] include::layout/to_dateperiod.asciidoc[] include::layout/to_datetime.asciidoc[] +include::layout/to_date_nanos.asciidoc[] include::layout/to_degrees.asciidoc[] include::layout/to_double.asciidoc[] include::layout/to_geopoint.asciidoc[] diff --git a/docs/reference/esql/functions/types/date_format.asciidoc b/docs/reference/esql/functions/types/date_format.asciidoc index b2e97dfa8835a..580094e9be906 100644 --- a/docs/reference/esql/functions/types/date_format.asciidoc +++ b/docs/reference/esql/functions/types/date_format.asciidoc @@ -5,6 +5,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== dateFormat | date | result +date | | keyword keyword | date | keyword text | date | keyword |=== diff --git a/docs/reference/esql/functions/types/hash.asciidoc b/docs/reference/esql/functions/types/hash.asciidoc new file mode 100644 index 0000000000000..786ba03b2aa60 --- /dev/null +++ b/docs/reference/esql/functions/types/hash.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +algorithm | input | result +keyword | keyword | keyword +keyword | text | keyword +text | keyword | keyword +text | text | keyword +|=== diff --git a/docs/reference/esql/functions/types/md5.asciidoc b/docs/reference/esql/functions/types/md5.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/md5.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha1.asciidoc b/docs/reference/esql/functions/types/sha1.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha1.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha256.asciidoc b/docs/reference/esql/functions/types/sha256.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha256.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc index 1d6fc1e90d595..68336d5358eaf 100644 --- a/docs/reference/esql/processing-commands/where.asciidoc +++ b/docs/reference/esql/processing-commands/where.asciidoc @@ -7,7 +7,7 @@ the input table for which the provided condition evaluates to `true`. [TIP] ==== -In case of value exclusions, fields with `null` values will be excluded from search results. +In case of value exclusions, fields with `null` values will be excluded from search results. In this context a `null` means either there is an explicit `null` value in the document or there is no value at all. For example: `WHERE field != "value"` will be interpreted as `WHERE field != "value" AND field IS NOT NULL`. ==== @@ -58,6 +58,26 @@ For a complete list of all functions, refer to <>. include::../functions/predicates.asciidoc[tag=body] +For matching text, you can use <> like `MATCH`. + +Use <> to perform a <> on a specified field. + +Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. + +[source.merge.styled,esql] +---- +include::{esql-specs}/match-function.csv-spec[tag=match-with-field] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/match-function.csv-spec[tag=match-with-field-result] +|=== + +[TIP] +==== +You can also use the shorthand <> `:` instead of `MATCH`. +==== + include::../functions/like.asciidoc[tag=body] include::../functions/rlike.asciidoc[tag=body] diff --git a/docs/reference/features/apis/features-apis.asciidoc b/docs/reference/features/apis/features-apis.asciidoc index fe06471cff0df..2582446340f15 100644 --- a/docs/reference/features/apis/features-apis.asciidoc +++ b/docs/reference/features/apis/features-apis.asciidoc @@ -1,6 +1,12 @@ [[features-apis]] == Features APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-features[Features APIs]. +-- + You can use the following APIs to introspect and manage Features provided by Elasticsearch and Elasticsearch plugins. diff --git a/docs/reference/features/apis/get-features-api.asciidoc b/docs/reference/features/apis/get-features-api.asciidoc index 676ec3c41da24..62986d7728ca7 100644 --- a/docs/reference/features/apis/get-features-api.asciidoc +++ b/docs/reference/features/apis/get-features-api.asciidoc @@ -4,6 +4,12 @@ Get features ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-features[Features APIs]. +-- + Gets a list of features which can be included in snapshots using the <> when creating a snapshot. diff --git a/docs/reference/features/apis/reset-features-api.asciidoc b/docs/reference/features/apis/reset-features-api.asciidoc index 36ff12cf0fc33..e2d3f249304bf 100644 --- a/docs/reference/features/apis/reset-features-api.asciidoc +++ b/docs/reference/features/apis/reset-features-api.asciidoc @@ -6,6 +6,12 @@ experimental::[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-features[Features APIs]. +-- + Clears all of the state information stored in system indices by {es} features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. diff --git a/docs/reference/fleet/fleet-multi-search.asciidoc b/docs/reference/fleet/fleet-multi-search.asciidoc index 3ee6b67b06ba4..5673e1abbbd1d 100644 --- a/docs/reference/fleet/fleet-multi-search.asciidoc +++ b/docs/reference/fleet/fleet-multi-search.asciidoc @@ -5,6 +5,12 @@ Fleet multi search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-fleet[{fleet} APIs]. +-- + Executes several <> with a single API request. The API follows the same structure as the <> API. However, diff --git a/docs/reference/fleet/fleet-search.asciidoc b/docs/reference/fleet/fleet-search.asciidoc index 961846385969a..81ad0c9a7aa95 100644 --- a/docs/reference/fleet/fleet-search.asciidoc +++ b/docs/reference/fleet/fleet-search.asciidoc @@ -5,6 +5,12 @@ Fleet search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-fleet[{fleet} APIs]. +-- + The purpose of the fleet search api is to provide a search api where the search will only be executed after provided checkpoint has been processed and is visible for searches inside of Elasticsearch. diff --git a/docs/reference/fleet/index.asciidoc b/docs/reference/fleet/index.asciidoc index b22609aff32e7..8cd3cd59f21b0 100644 --- a/docs/reference/fleet/index.asciidoc +++ b/docs/reference/fleet/index.asciidoc @@ -2,6 +2,12 @@ [[fleet-apis]] == Fleet APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-fleet[{fleet} APIs]. +-- + TIP: For the {kib} {fleet} APIs, see the {fleet-guide}/fleet-api-docs.html[Fleet API Documentation]. diff --git a/docs/reference/graph/explore.asciidoc b/docs/reference/graph/explore.asciidoc index 34ac367125ade..60e9edb3b0f38 100644 --- a/docs/reference/graph/explore.asciidoc +++ b/docs/reference/graph/explore.asciidoc @@ -2,6 +2,12 @@ [[graph-explore-api]] == Graph explore API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-graph[Graph explore APIs]. +-- + The graph explore API enables you to extract and summarize information about the documents and terms in an {es} data stream or index. diff --git a/docs/reference/health/health.asciidoc b/docs/reference/health/health.asciidoc index 34714e80e1b18..606804a83bbda 100644 --- a/docs/reference/health/health.asciidoc +++ b/docs/reference/health/health.asciidoc @@ -4,6 +4,12 @@ Health ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-health_report[Cluster health APIs]. +-- + An API that reports the health status of an {es} cluster. [[health-api-request]] diff --git a/docs/reference/high-availability.asciidoc b/docs/reference/high-availability.asciidoc index 2f34b6bc1bb21..37e2a38aa0f2c 100644 --- a/docs/reference/high-availability.asciidoc +++ b/docs/reference/high-availability.asciidoc @@ -3,28 +3,28 @@ [partintro] -- -Your data is important to you. Keeping it safe and available is important -to {es}. Sometimes your cluster may experience hardware failure or a power -loss. To help you plan for this, {es} offers a number of features -to achieve high availability despite failures. +Your data is important to you. Keeping it safe and available is important to Elastic. Sometimes your cluster may experience hardware failure or a power loss. To help you plan for this, {es} offers a number of features to achieve high availability despite failures. Depending on your deployment type, you might need to provision servers in different zones or configure external repositories to meet your organization's availability needs. -* With proper planning, a cluster can be - <> to many of the - things that commonly go wrong, from the loss of a single node or network - connection right up to a zone-wide outage such as power loss. +* *<>* ++ +Distributed systems like Elasticsearch are designed to keep working even if some of their components have failed. An Elasticsearch cluster can continue operating normally if some of its nodes are unavailable or disconnected, as long as there are enough well-connected nodes to take over the unavailable node's responsibilities. ++ +If you're designing a smaller cluster, you might focus on making your cluster resilient to single-node failures. Designers of larger clusters must also consider cases where multiple nodes fail at the same time. +// need to improve connections to ECE, EC hosted, ECK pod/zone docs in the child topics -* You can use <> to replicate data to a remote _follower_ - cluster which may be in a different data centre or even on a different - continent from the leader cluster. The follower cluster acts as a hot - standby, ready for you to fail over in the event of a disaster so severe that - the leader cluster fails. The follower cluster can also act as a geo-replica - to serve searches from nearby clients. +* *<>* ++ +To effectively distribute read and write operations across nodes, the nodes in a cluster need good, reliable connections to each other. To provide better connections, you typically co-locate the nodes in the same data center or nearby data centers. ++ +Co-locating nodes in a single location exposes you to the risk of a single outage taking your entire cluster offline. To maintain high availability, you can prepare a second cluster that can take over in case of disaster by implementing {ccr} (CCR). ++ +CCR provides a way to automatically synchronize indices from a leader cluster to a follower cluster. This cluster could be in a different data center or even a different content from the leader cluster. If the primary cluster fails, the secondary cluster can take over. ++ +TIP: You can also use CCR to create secondary clusters to serve read requests in geo-proximity to your users. -* The last line of defence against data loss is to take - <> of your cluster so that you can - restore a completely fresh copy of it elsewhere if needed. +* *<>* ++ +Take snapshots of your cluster that can be restored in case of failure. -- include::high-availability/cluster-design.asciidoc[] - -include::ccr/index.asciidoc[] diff --git a/docs/reference/high-availability/cluster-design.asciidoc b/docs/reference/high-availability/cluster-design.asciidoc index 105c8b236b0b1..d187db83c43f9 100644 --- a/docs/reference/high-availability/cluster-design.asciidoc +++ b/docs/reference/high-availability/cluster-design.asciidoc @@ -87,7 +87,7 @@ the same thing, but it's not necessary to use this feature in such a small cluster. We recommend you set only one of your two nodes to be -<>. This means you can be certain which of your +<>. This means you can be certain which of your nodes is the elected master of the cluster. The cluster can tolerate the loss of the other master-ineligible node. If you set both nodes to master-eligible, two nodes are required for a master election. Since the election will fail if either @@ -164,12 +164,12 @@ cluster that is suitable for production deployments. [[high-availability-cluster-design-three-nodes]] ==== Three-node clusters -If you have three nodes, we recommend they all be <> and +If you have three nodes, we recommend they all be <> and every index that is not a <> should have at least one replica. Nodes are data nodes by default. You may prefer for some indices to have two replicas so that each node has a copy of each shard in those indices. You should also configure each node to be -<> so that any two of them can hold a master +<> so that any two of them can hold a master election without needing to communicate with the third node. Nodes are master-eligible by default. This cluster will be resilient to the loss of any single node. @@ -188,8 +188,8 @@ service provides such a load balancer. Once your cluster grows to more than three nodes, you can start to specialise these nodes according to their responsibilities, allowing you to scale their -resources independently as needed. You can have as many <>, <>, <>, etc. as needed to +resources independently as needed. You can have as many <>, <>, <>, etc. as needed to support your workload. As your cluster grows larger, we recommend using dedicated nodes for each role. This allows you to independently scale resources for each task. diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 0ef55d7808873..2db03a19b3532 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -567,3 +567,8 @@ also possible to update the client-side logic in order to route queries to the relevant indices based on filters. However `constant_keyword` makes it transparently and allows to decouple search requests from the index topology in exchange of very little overhead. + +[discrete] +=== Default search timeout + +By default, search requests don't time out. You can set a timeout using the <> setting. diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index fc9a35e4ef570..64a421f091eff 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -5,6 +5,12 @@ Delete policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Deletes an index <> policy. [[ilm-delete-lifecycle-request]] diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 31c6ae9e82ec7..4cc2667186442 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -5,6 +5,12 @@ Explain lifecycle ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retrieves the current <> status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices. diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index b4e07389a9fb7..b02d129ebe734 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -5,6 +5,12 @@ Get policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retrieves a <> policy. [[ilm-get-lifecycle-request]] diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index f2ab8d65ec9a1..648080f26b79b 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -7,6 +7,12 @@ Get {ilm} status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retrieves the current <> ({ilm-init}) status. You can start or stop {ilm-init} with the <> and diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc index 149ba2a6b4491..9cc594be1bea7 100644 --- a/docs/reference/ilm/apis/ilm-api.asciidoc +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -1,6 +1,12 @@ [[index-lifecycle-management-api]] == {ilm-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + You use the following APIs to set up policies to automatically manage the index lifecycle. For more information about {ilm} ({ilm-init}), see <>. diff --git a/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc b/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc index 8ba57120a8a65..bbcdd71c45f1a 100644 --- a/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc +++ b/docs/reference/ilm/apis/migrate-to-data-tiers.asciidoc @@ -5,7 +5,13 @@ Migrate indices, ILM policies, and legacy, composable and component templates to data tiers routing ++++ -Switches the indices, ILM policies, and legacy, composable and component templates from using custom node attributes and +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + +Switches the indices, ILM policies, and legacy, composable and component templates from using <> and <> to using <>, and optionally deletes one legacy index template. Using node roles enables {ilm-init} to <> between diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index f3441fa997cff..a7a8cacf551db 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -5,6 +5,12 @@ Move to step ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Triggers execution of a specific step in the <> policy. [[ilm-move-to-step-request]] diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index 390f6b1bb4d15..3aa691ab06da9 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -5,6 +5,12 @@ Create or update lifecycle policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Creates or updates <> policy. See <> for definitions of policy components. diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 107cab4d5aa19..5b5842f28619c 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -5,6 +5,12 @@ Remove policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Removes assigned <> policies from an index or a data stream's backing indices. diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index 8f01f15e0c3ad..a41d064b73400 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -5,6 +5,12 @@ Retry policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Retry executing the <> policy for an index that is in the ERROR step. [[ilm-retry-policy-request]] diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index c38b3d9ca8831..ce9b64455be05 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -7,6 +7,12 @@ Start {ilm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Start the <> ({ilm-init}) plugin. [[ilm-start-request]] diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index a6100d794c2d3..50d8aaf4e1fd6 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -7,6 +7,12 @@ Stop {ilm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ilm[{ilm-cap} APIs]. +-- + Stop the <> ({ilm-init}) plugin. [[ilm-stop-request]] diff --git a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc index 6ec261fabc448..0b3c17fb2caae 100644 --- a/docs/reference/ilm/example-index-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/example-index-lifecycle-policy.asciidoc @@ -24,7 +24,7 @@ and retention requirements. You want to send log files to an {es} cluster so you can visualize and analyze the data. This data has the following retention requirements: -* When the write index reaches 50GB or is 30 days old, roll over to a new index. +* When the primary shard size of the write index reaches 50GB or the index is 30 days old, roll over to a new index. * After rollover, keep indices in the hot data tier for 30 days. * 30 days after rollover: ** Move indices to the warm data tier. @@ -84,7 +84,7 @@ To save the `logs@lifecycle` policy as a new policy in {kib}: . On the **Edit policy logs** page, toggle **Save as new policy**, and then provide a new name for the policy, for example, `logs-custom`. The `logs@lifecycle` policy uses the recommended rollover defaults: Start writing to a new -index when the current write index reaches 50GB or becomes 30 days old. +index when the primary shard size of the current write index reaches 50GB or the index becomes 30 days old. To view or change the rollover settings, click **Advanced settings** for the hot phase. Then disable **Use recommended defaults** to display the rollover diff --git a/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png b/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png index 14ff66e410835..d7f314cedb261 100644 Binary files a/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png and b/docs/reference/images/ilm/tutorial-ilm-hotphaserollover-default.png differ diff --git a/docs/reference/images/search/full-text-search-overview.svg b/docs/reference/images/search/full-text-search-overview.svg new file mode 100644 index 0000000000000..e7a1c5ba14cfa --- /dev/null +++ b/docs/reference/images/search/full-text-search-overview.svg @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + Full-text search with Elasticsearch + + + + + Source documents + + + + Analysis pipeline + Transforms text to normalized terms + + + + Inverted index + Search-optimized data structure + + + + Search query + + + + Relevance scoring + Similarity algorithm scores documents + + + + Search results + Most relevant first + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/reference/images/search/rag-schema.svg b/docs/reference/images/search/rag-schema.svg new file mode 100644 index 0000000000000..f26edac6c0077 --- /dev/null +++ b/docs/reference/images/search/rag-schema.svg @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + Define how the model should + parse and render information + + Custom instructions + + + + + + Full-text, semantic + or hybrid search + + Search strategy + + + + + + + 1 + + + 2 + + + 3 + + + 4 + + + + + + User query + + + + Elasticsearch + + Retrieves relevant + documents + + + + Language model + Processes context & + + generates answer + + + + + + + + + Response + \ No newline at end of file diff --git a/docs/reference/images/search/rag-venn-diagram.svg b/docs/reference/images/search/rag-venn-diagram.svg new file mode 100644 index 0000000000000..9906aaefaba0c --- /dev/null +++ b/docs/reference/images/search/rag-venn-diagram.svg @@ -0,0 +1,19 @@ + + + + + + + + + Information + retrieval + + + Generative + AI + + + + RAG + \ No newline at end of file diff --git a/docs/reference/index-modules/allocation/data_tier_allocation.asciidoc b/docs/reference/index-modules/allocation/data_tier_allocation.asciidoc index d08af21007622..2d59e9be31cd4 100644 --- a/docs/reference/index-modules/allocation/data_tier_allocation.asciidoc +++ b/docs/reference/index-modules/allocation/data_tier_allocation.asciidoc @@ -13,7 +13,7 @@ This setting corresponds to the data node roles: * <> * <> -NOTE: The <> role is not a valid data tier and cannot be used +NOTE: The <> role is not a valid data tier and cannot be used with the `_tier_preference` setting. The frozen tier stores <> exclusively. diff --git a/docs/reference/index-modules/allocation/filtering.asciidoc b/docs/reference/index-modules/allocation/filtering.asciidoc index 07a2455ca1eff..5da8e254cb4f2 100644 --- a/docs/reference/index-modules/allocation/filtering.asciidoc +++ b/docs/reference/index-modules/allocation/filtering.asciidoc @@ -6,7 +6,7 @@ a particular index. These per-index filters are applied in conjunction with <> and <>. -Shard allocation filters can be based on custom node attributes or the built-in +Shard allocation filters can be based on <> or the built-in `_name`, `_host_ip`, `_publish_ip`, `_ip`, `_host`, `_id`, `_tier` and `_tier_preference` attributes. <> uses filters based on custom node attributes to determine how to reallocate shards when moving @@ -114,7 +114,7 @@ The index allocation settings support the following built-in attributes: NOTE: `_tier` filtering is based on <> roles. Only a subset of roles are <> roles, and the generic -<> will match any tier filtering. +<> will match any tier filtering. You can use wildcards when specifying attribute values, for example: diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index c29296b59ad4a..e848668c1a66d 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -1,15 +1,118 @@ [[index-modules-slowlog]] -== Slow Log +== Slow log + +The slow log records database searching and indexing events that have execution durations above specified thresholds. You can use these logs to investigate analyze or troubleshoot your cluster's historical search and indexing performance. + +Slow logs report task duration at the shard level for searches, and at the index level +for indexing, but might not encompass the full task execution time observed on the client. For example, slow logs don't surface HTTP network delays or the impact of <>. + +Events that meet the specified threshold are emitted into <> under the `fileset.name` of `slowlog`. These logs can be viewed in the following locations: + +* If <> is enabled, from +{kibana-ref}/xpack-monitoring.html[Stack Monitoring]. Slow log events have a `logger` value of `index.search.slowlog` or `index.indexing.slowlog`. + +* From local {es} service logs directory. Slow log files have a suffix of `_index_search_slowlog.json` or `_index_indexing_slowlog.json`. + +[discrete] +[[slow-log-format]] +=== Slow log format + +The following is an example of a search event in the slow log: + +TIP: If a call was initiated with an `X-Opaque-ID` header, then the ID is automatically included in Search slow logs in the **elasticsearch.slowlog.id** field. See <> for details and best practices. + +[source,js] +--------------------------- +{ + "@timestamp": "2024-12-21T12:42:37.255Z", + "auth.type": "REALM", + "ecs.version": "1.2.0", + "elasticsearch.cluster.name": "distribution_run", + "elasticsearch.cluster.uuid": "Ui23kfF1SHKJwu_hI1iPPQ", + "elasticsearch.node.id": "JK-jn-XpQ3OsDUsq5ZtfGg", + "elasticsearch.node.name": "node-0", + "elasticsearch.slowlog.id": "tomcat-123", + "elasticsearch.slowlog.message": "[index6][0]", + "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", + "elasticsearch.slowlog.source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}", + "elasticsearch.slowlog.stats": "[]", + "elasticsearch.slowlog.took": "747.3micros", + "elasticsearch.slowlog.took_millis": 0, + "elasticsearch.slowlog.total_hits": "1 hits", + "elasticsearch.slowlog.total_shards": 1, + "event.dataset": "elasticsearch.index_search_slowlog", + "fileset.name" : "slowlog", + "log.level": "WARN", + "log.logger": "index.search.slowlog.query", + "process.thread.name": "elasticsearch[runTask-0][search][T#5]", + "service.name": "ES_ECS", + "user.name": "elastic", + "user.realm": "reserved" +} + +--------------------------- +// NOTCONSOLE + + +The following is an example of an indexing event in the slow log: + +[source,js] +--------------------------- +{ + "@timestamp" : "2024-12-11T22:34:22.613Z", + "auth.type": "REALM", + "ecs.version": "1.2.0", + "elasticsearch.cluster.name" : "41bd111609d849fc9bf9d25b5df9ce96", + "elasticsearch.cluster.uuid" : "BZTn4I9URXSK26imlia0QA", + "elasticsearch.index.id" : "3VfGR7wRRRKmMCEn7Ii58g", + "elasticsearch.index.name": "my-index-000001", + "elasticsearch.node.id" : "GGiBgg21S3eqPDHzQiCMvQ", + "elasticsearch.node.name" : "instance-0000000001", + "elasticsearch.slowlog.id" : "RCHbt5MBT0oSsCOu54AJ", + "elasticsearch.slowlog.source": "{\"key\":\"value\"}" + "elasticsearch.slowlog.took" : "0.01ms", + "event.dataset": "elasticsearch.index_indexing_slowlog", + "fileset.name" : "slowlog", + "log.level" : "TRACE", + "log.logger" : "index.indexing.slowlog.index", + "service.name" : "ES_ECS", + "user.name": "elastic", + "user.realm": "reserved" +} + +--------------------------- +// NOTCONSOLE + +[discrete] +[[enable-slow-log]] +=== Enable slow logging + +You can enable slow logging at two levels: + +* For all indices under the <>. This method requires a node restart. +* At the index level, using the <> + +By default, all thresholds are set to `-1`, which results in no events being logged. + +Slow log thresholds can be enabled for the four logging levels: `trace`, `debug`, `info`, and `warn`. You can mimic setting log level thresholds by disabling more verbose levels. + +To view the current slow log settings, use the <>: + +[source,console] +-------------------------------------------------- +GET _all/_settings?expand_wildcards=all&filter_path=*.settings.index.*.slowlog +-------------------------------------------------- [discrete] [[search-slow-log]] -=== Search Slow Log +==== Enable slow logging for search events + +Search slow logs emit per shard. They must be enabled separately for the shard's link:https://www.elastic.co/blog/understanding-query-then-fetch-vs-dfs-query-then-fetch[query and fetch search phases]. -Shard level slow search log allows to log slow search (query and fetch -phases) into a dedicated log file. +You can use the `index.search.slowlog.include.user` setting to append `user.*` and `auth.type` fields to slow log entries. These fields contain information about the user who triggered the request. -Thresholds can be set for both the query phase of the execution, and -fetch phase, here is a sample: +The following snippet adjusts all available search slow log settings across all indices using the +<>: [source,yaml] -------------------------------------------------- @@ -22,10 +125,11 @@ index.search.slowlog.threshold.fetch.warn: 1s index.search.slowlog.threshold.fetch.info: 800ms index.search.slowlog.threshold.fetch.debug: 500ms index.search.slowlog.threshold.fetch.trace: 200ms + +index.search.slowlog.include.user: true -------------------------------------------------- -All of the above settings are _dynamic_ and can be set for each index using the -<> API. For example: +The following snippet adjusts the same settings for a single index using the <>: [source,console] -------------------------------------------------- @@ -38,138 +142,109 @@ PUT /my-index-000001/_settings "index.search.slowlog.threshold.fetch.warn": "1s", "index.search.slowlog.threshold.fetch.info": "800ms", "index.search.slowlog.threshold.fetch.debug": "500ms", - "index.search.slowlog.threshold.fetch.trace": "200ms" + "index.search.slowlog.threshold.fetch.trace": "200ms", + "index.search.slowlog.include.user": true } -------------------------------------------------- // TEST[setup:my_index] -By default thresholds are disabled (set to `-1`). -The logging is done on the shard level scope, meaning the execution of a -search request within a specific shard. It does not encompass the whole -search request, which can be broadcast to several shards in order to -execute. Some of the benefits of shard level logging is the association -of the actual execution on the specific machine, compared with request -level. +[discrete] +[[index-slow-log]] +==== Enable slow logging for indexing events +Indexing slow logs emit per index document. -The search slow log file is configured in the `log4j2.properties` file. +You can use the `index.indexing.slowlog.include.user` setting to append `user.*` and `auth.type` fields to slow log entries. These fields contain information about the user who triggered the request. -[discrete] -==== Identifying search slow log origin +The following snippet adjusts all available indexing slow log settings across all indices using the +<>: -It is often useful to identify what triggered a slow running query. -To include information about the user that triggered a slow search, -use the `index.search.slowlog.include.user` setting. +[source,yaml] +-------------------------------------------------- +index.indexing.slowlog.threshold.index.warn: 10s +index.indexing.slowlog.threshold.index.info: 5s +index.indexing.slowlog.threshold.index.debug: 2s +index.indexing.slowlog.threshold.index.trace: 500ms + +index.indexing.slowlog.source: 1000 +index.indexing.slowlog.reformat: true + +index.indexing.slowlog.include.user: true +-------------------------------------------------- + + +The following snippet adjusts the same settings for a single index using the <>: [source,console] -------------------------------------------------- PUT /my-index-000001/_settings { - "index.search.slowlog.include.user": true + "index.indexing.slowlog.threshold.index.warn": "10s", + "index.indexing.slowlog.threshold.index.info": "5s", + "index.indexing.slowlog.threshold.index.debug": "2s", + "index.indexing.slowlog.threshold.index.trace": "500ms", + "index.indexing.slowlog.source": "1000", + "index.indexing.slowlog.reformat": true, + "index.indexing.slowlog.include.user": true } -------------------------------------------------- // TEST[setup:my_index] -This will result in user information being included in the slow log. +[discrete] +===== Logging the `_source` field -[source,js] ---------------------------- -{ - "@timestamp": "2024-02-21T12:42:37.255Z", - "log.level": "WARN", - "auth.type": "REALM", - "elasticsearch.slowlog.id": "tomcat-123", - "elasticsearch.slowlog.message": "[index6][0]", - "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", - "elasticsearch.slowlog.source": "{\"query\":{\"match_all\":{\"boost\":1.0}}}", - "elasticsearch.slowlog.stats": "[]", - "elasticsearch.slowlog.took": "747.3micros", - "elasticsearch.slowlog.took_millis": 0, - "elasticsearch.slowlog.total_hits": "1 hits", - "elasticsearch.slowlog.total_shards": 1, - "user.name": "elastic", - "user.realm": "reserved", - "ecs.version": "1.2.0", - "service.name": "ES_ECS", - "event.dataset": "elasticsearch.index_search_slowlog", - "process.thread.name": "elasticsearch[runTask-0][search][T#5]", - "log.logger": "index.search.slowlog.query", - "elasticsearch.cluster.uuid": "Ui23kfF1SHKJwu_hI1iPPQ", - "elasticsearch.node.id": "JK-jn-XpQ3OsDUsq5ZtfGg", - "elasticsearch.node.name": "node-0", - "elasticsearch.cluster.name": "distribution_run" -} +By default, {es} logs the first 1000 characters of the `_source` in the slow log. You can adjust how `_source` is logged using the `index.indexing.slowlog.source` setting. Set `index.indexing.slowlog.source` to `false` or `0` to skip logging the source entirely. Set `index.indexing.slowlog.source` to `true` to log the entire source regardless of size. ---------------------------- -// NOTCONSOLE +The original `_source` is reformatted by default to make sure that it fits on a single log line. If preserving the original document format is important, then you can turn off reformatting by setting `index.indexing.slowlog.reformat` to `false`. This causes source to be logged with the original formatting intact, potentially spanning multiple log lines. -If a call was initiated with an `X-Opaque-ID` header, then the ID is included -in Search Slow logs in the **elasticsearch.slowlog.id** field. See -<> for details and best practices. +[discrete] +[[slow-log-fields]] [discrete] -[[index-slow-log]] -=== Index Slow log +[[troubleshoot-slow-log]] +=== Best practices for slow logging -The indexing slow log, similar in functionality to the search slow -log. The log file name ends with `_index_indexing_slowlog.json`. Log and -the thresholds are configured in the same way as the search slowlog. -Index slowlog sample: +Logging slow requests can be resource intensive to your {es} cluster depending on the qualifying traffic's volume. For example, emitted logs might increase the index disk usage of your <> cluster. To reduce the impact of slow logs, consider the following: -[source,yaml] --------------------------------------------------- -index.indexing.slowlog.threshold.index.warn: 10s -index.indexing.slowlog.threshold.index.info: 5s -index.indexing.slowlog.threshold.index.debug: 2s -index.indexing.slowlog.threshold.index.trace: 500ms -index.indexing.slowlog.source: 1000 --------------------------------------------------- +* Enable slow logs against specific indices rather than across all indices. +* Set high thresholds to reduce the number of logged events. +* Enable slow logs only when troubleshooting. -All of the above settings are _dynamic_ and can be set for each index using the -<> API. For example: +If you aren't sure how to start investigating traffic issues, consider enabling the `warn` threshold with a high `30s` threshold at the index level using the <>: +* Enable for search requests: ++ [source,console] -------------------------------------------------- -PUT /my-index-000001/_settings +PUT /*/_settings { - "index.indexing.slowlog.threshold.index.warn": "10s", - "index.indexing.slowlog.threshold.index.info": "5s", - "index.indexing.slowlog.threshold.index.debug": "2s", - "index.indexing.slowlog.threshold.index.trace": "500ms", - "index.indexing.slowlog.source": "1000" + "index.search.slowlog.include.user": true, + "index.search.slowlog.threshold.fetch.warn": "30s", + "index.search.slowlog.threshold.query.warn": "30s" } -------------------------------------------------- // TEST[setup:my_index] -To include information about the user that triggered a slow indexing event, -use the `index.indexing.slowlog.include.user` setting. - +* Enable for indexing requests: ++ [source,console] -------------------------------------------------- -PUT /my-index-000001/_settings +PUT /*/_settings { - "index.indexing.slowlog.include.user": true + "index.indexing.slowlog.include.user": true, + "index.indexing.slowlog.threshold.index.warn": "30s" } -------------------------------------------------- // TEST[setup:my_index] -By default Elasticsearch will log the first 1000 characters of the _source in -the slowlog. You can change that with `index.indexing.slowlog.source`. Setting -it to `false` or `0` will skip logging the source entirely, while setting it to -`true` will log the entire source regardless of size. The original `_source` is -reformatted by default to make sure that it fits on a single log line. If preserving -the original document format is important, you can turn off reformatting by setting -`index.indexing.slowlog.reformat` to `false`, which will cause the source to be -logged "as is" and can potentially span multiple log lines. +Slow log thresholds being met does not guarantee cluster performance issues. In the event that symptoms are noticed, slow logs can provide helpful data to diagnose upstream traffic patterns or sources to resolve client-side issues. For example, you can use data included in `X-Opaque-ID`, the `_source` request body, or `user.*` fields to identify the source of your issue. This is similar to troubleshooting <>. + +If you're experiencing search performance issues, then you might also consider investigating searches flagged for their query durations using the <>. You can then use the profiled query to investigate optimization options using the link:{kibana-ref}/xpack-profiler.html[query profiler]. This type of investigation should usually take place in a non-production environment. -The index slow log file is configured in the `log4j2.properties` file. +Slow logging checks each event against the reporting threshold when the event is complete. This means that it can't report if events trigger <>. If suspect circuit breaker errors, then you should also consider enabling <>, which logs events before they are executed. [discrete] -=== Slow log levels +=== Learn more -You can mimic the search or indexing slow log level by setting appropriate -threshold making "more verbose" loggers to be switched off. -If for instance we want to simulate `index.indexing.slowlog.level: INFO` -then all we need to do is to set -`index.indexing.slowlog.threshold.index.debug` and `index.indexing.slowlog.threshold.index.trace` to `-1`. +To learn about other ways to optimize your search and indexing requests, refer to <> and <>. \ No newline at end of file diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 18052cfb64e8f..8e1c211eb9426 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -76,8 +76,12 @@ include::autoscaling/index.asciidoc[] include::snapshot-restore/index.asciidoc[] +include::ccr/index.asciidoc[leveloffset=-1] + // reference +include::data-store-architecture.asciidoc[] + include::rest-api/index.asciidoc[] include::commands/index.asciidoc[] diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index eaef54a1effb1..b6b82422cbb4a 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -1,6 +1,12 @@ [[indices]] == Index APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Index APIs are used to manage individual indices, index settings, aliases, mappings, and index templates. @@ -18,7 +24,6 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> -* <> * <> * <> * <> @@ -137,6 +142,5 @@ include::indices/shrink-index.asciidoc[] include::indices/simulate-index.asciidoc[] include::indices/simulate-template.asciidoc[] include::indices/split-index.asciidoc[] -include::indices/apis/unfreeze.asciidoc[] include::indices/update-settings.asciidoc[] include::indices/put-mapping.asciidoc[] diff --git a/docs/reference/indices/add-alias.asciidoc b/docs/reference/indices/add-alias.asciidoc index e14af6a64a2ec..13b49f5a47dd1 100644 --- a/docs/reference/indices/add-alias.asciidoc +++ b/docs/reference/indices/add-alias.asciidoc @@ -4,6 +4,12 @@ Create or update alias ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Adds a data stream or index to an <>. [source,console] diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc index a514d36a1bfef..c086143ed3f1c 100644 --- a/docs/reference/indices/alias-exists.asciidoc +++ b/docs/reference/indices/alias-exists.asciidoc @@ -4,6 +4,12 @@ Alias exists ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Checks if an <> exists. [source,console] diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 1df9e0a4883b8..243f436f55b5a 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -4,6 +4,12 @@ Aliases ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Performs one or more <> actions in a single atomic operation. [source,console] diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index eb4d877b463cd..a211a44730a64 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -4,6 +4,12 @@ Analyze ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Performs <> on a text string and returns the resulting tokens. diff --git a/docs/reference/indices/apis/reload-analyzers.asciidoc b/docs/reference/indices/apis/reload-analyzers.asciidoc index ca5f540564f8e..a27a3bb859cf1 100644 --- a/docs/reference/indices/apis/reload-analyzers.asciidoc +++ b/docs/reference/indices/apis/reload-analyzers.asciidoc @@ -2,6 +2,12 @@ [[indices-reload-analyzers]] == Reload search analyzers API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-indices-reload-search-analyzers[Reload search analyzers]. +-- + Reloads an index's <> and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices. diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc deleted file mode 100644 index 450efc03c492a..0000000000000 --- a/docs/reference/indices/apis/unfreeze.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -[role="xpack"] -[[unfreeze-index-api]] -=== Unfreeze index API -++++ -Unfreeze index -++++ - -[WARNING] -.Deprecated in 7.14 -==== -In 8.0, we removed the ability to freeze an index. In previous versions, -freezing an index reduced its memory overhead. However, frozen indices are no -longer useful due to -https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent -improvements in heap memory usage]. -You can use this API to unfreeze indices that were frozen in 7.x. Frozen indices -are not related to the frozen data tier. -==== - -Unfreezes an index. - -[[unfreeze-index-api-request]] -==== {api-request-title} - -`POST //_unfreeze` - -[[unfreeze-index-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the `manage` -<> for the target index or index alias. - -[[unfreeze-index-api-desc]] -==== {api-description-title} - -When a frozen index is unfrozen, the index goes through the normal recovery -process and becomes writeable again. - -[[unfreeze-index-api-path-parms]] -==== {api-path-parms-title} - -``:: - (Required, string) Identifier for the index. - -[[unfreeze-index-api-examples]] -==== {api-examples-title} - -The following example unfreezes an index: - -[source,console] --------------------------------------------------- -POST /my-index-000001/_unfreeze --------------------------------------------------- -// TEST[s/^/PUT my-index-000001\n/] -// TEST[skip:unable to ignore deprecation warning] diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index a3150ec6f72ee..f981a6d69dd63 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -4,6 +4,12 @@ Clear cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Clears the caches of one or more indices. For data streams, the API clears the caches of the stream's backing indices. diff --git a/docs/reference/indices/clone-index.asciidoc b/docs/reference/indices/clone-index.asciidoc index c8e5d2e200f2e..734ff33e48215 100644 --- a/docs/reference/indices/clone-index.asciidoc +++ b/docs/reference/indices/clone-index.asciidoc @@ -4,6 +4,12 @@ Clone index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Clones an existing index. [source,console] diff --git a/docs/reference/indices/close.asciidoc b/docs/reference/indices/close.asciidoc index a4bf1742fdea8..56d318aad2f7a 100644 --- a/docs/reference/indices/close.asciidoc +++ b/docs/reference/indices/close.asciidoc @@ -4,6 +4,12 @@ Close index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Closes an index. [source,console] diff --git a/docs/reference/indices/create-data-stream.asciidoc b/docs/reference/indices/create-data-stream.asciidoc index e97e9973f1063..dd2d471117949 100644 --- a/docs/reference/indices/create-data-stream.asciidoc +++ b/docs/reference/indices/create-data-stream.asciidoc @@ -5,6 +5,12 @@ Create data stream ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Creates a new <>. //// diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index 2e66f3d6030cb..a210b05b2bcf0 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -4,6 +4,12 @@ Create index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Creates a new index. [source,console] diff --git a/docs/reference/indices/dangling-index-delete.asciidoc b/docs/reference/indices/dangling-index-delete.asciidoc index 6af35031e9e61..b1fd6790972c4 100644 --- a/docs/reference/indices/dangling-index-delete.asciidoc +++ b/docs/reference/indices/dangling-index-delete.asciidoc @@ -4,6 +4,12 @@ Delete dangling index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Deletes a dangling index. [[dangling-index-delete-api-request]] diff --git a/docs/reference/indices/dangling-index-import.asciidoc b/docs/reference/indices/dangling-index-import.asciidoc index 44cde56de8c95..e266ecd2c9c70 100644 --- a/docs/reference/indices/dangling-index-import.asciidoc +++ b/docs/reference/indices/dangling-index-import.asciidoc @@ -4,6 +4,12 @@ Import dangling index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Imports a dangling index. [[dangling-index-import-api-request]] diff --git a/docs/reference/indices/dangling-indices-list.asciidoc b/docs/reference/indices/dangling-indices-list.asciidoc index b7774843e36d9..2b7dcdc497ebf 100644 --- a/docs/reference/indices/dangling-indices-list.asciidoc +++ b/docs/reference/indices/dangling-indices-list.asciidoc @@ -4,6 +4,12 @@ List dangling indices ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Lists dangling indices. [[dangling-indices-list-api-request]] diff --git a/docs/reference/indices/data-stream-stats.asciidoc b/docs/reference/indices/data-stream-stats.asciidoc index 3ed285abc035a..cce145b4dadcc 100644 --- a/docs/reference/indices/data-stream-stats.asciidoc +++ b/docs/reference/indices/data-stream-stats.asciidoc @@ -5,6 +5,12 @@ Data stream stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Retrieves statistics for one or more <>. //// diff --git a/docs/reference/indices/delete-alias.asciidoc b/docs/reference/indices/delete-alias.asciidoc index 748862df06100..c204c89579352 100644 --- a/docs/reference/indices/delete-alias.asciidoc +++ b/docs/reference/indices/delete-alias.asciidoc @@ -4,6 +4,12 @@ Delete alias ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Removes a data stream or index from an <>. [source,console] diff --git a/docs/reference/indices/delete-component-template.asciidoc b/docs/reference/indices/delete-component-template.asciidoc index 065a4adb90023..27c1e00a5d57a 100644 --- a/docs/reference/indices/delete-component-template.asciidoc +++ b/docs/reference/indices/delete-component-template.asciidoc @@ -4,6 +4,12 @@ Delete component template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Deletes an existing component template. //// diff --git a/docs/reference/indices/delete-data-stream.asciidoc b/docs/reference/indices/delete-data-stream.asciidoc index 38e7a00d451d8..1cb43a615ede8 100644 --- a/docs/reference/indices/delete-data-stream.asciidoc +++ b/docs/reference/indices/delete-data-stream.asciidoc @@ -5,6 +5,12 @@ Delete data stream ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Deletes one or more <> and their backing indices. See <>. diff --git a/docs/reference/indices/delete-index-template-v1.asciidoc b/docs/reference/indices/delete-index-template-v1.asciidoc index 98b1e2fb255f1..e035c13a7bceb 100644 --- a/docs/reference/indices/delete-index-template-v1.asciidoc +++ b/docs/reference/indices/delete-index-template-v1.asciidoc @@ -9,6 +9,12 @@ templates>>, which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Deletes a legacy index template. //// diff --git a/docs/reference/indices/delete-index-template.asciidoc b/docs/reference/indices/delete-index-template.asciidoc index b828e4a536b71..23713f62fa031 100644 --- a/docs/reference/indices/delete-index-template.asciidoc +++ b/docs/reference/indices/delete-index-template.asciidoc @@ -4,6 +4,12 @@ Delete index template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Deletes an <>. //// diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index d5d168154e44a..81ee7b502d377 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -4,6 +4,12 @@ Delete index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Deletes one or more indices. [source,console] diff --git a/docs/reference/indices/diskusage.asciidoc b/docs/reference/indices/diskusage.asciidoc index 3510ba346e5a7..6db122ab7d836 100644 --- a/docs/reference/indices/diskusage.asciidoc +++ b/docs/reference/indices/diskusage.asciidoc @@ -6,6 +6,12 @@ experimental[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Analyzes the disk usage of each field of an index or data stream. This API might not support indices created in previous {es} versions. The result of a small index can be inaccurate as some parts of an index diff --git a/docs/reference/indices/downsample-data-stream.asciidoc b/docs/reference/indices/downsample-data-stream.asciidoc index 5ace4e03dfb66..6354f8e30d254 100644 --- a/docs/reference/indices/downsample-data-stream.asciidoc +++ b/docs/reference/indices/downsample-data-stream.asciidoc @@ -5,6 +5,12 @@ Downsample ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, @@ -75,6 +81,8 @@ DELETE _index_template/* //// // end::downsample-example[] +Check the <> documentation for an overview, details about the downsampling process, and examples of running downsampling manually and as part of an ILM policy. + [[downsample-api-request]] ==== {api-request-title} @@ -115,44 +123,4 @@ to aggregate the original time series index. For example, `60m` produces a document for each 60 minute (hourly) interval. This follows standard time formatting syntax as used elsewhere in {es}. + -NOTE: Smaller, more granular intervals take up proportionally more space. - -[[downsample-api-process]] -==== The downsampling process - -The downsampling operation traverses the source TSDS index and performs the -following steps: - -. Creates a new document for each value of the `_tsid` field and each -`@timestamp` value, rounded to the `fixed_interval` defined in the downsample -configuration. -. For each new document, copies all <> from the source index to the target index. Dimensions in a -TSDS are constant, so this is done only once per bucket. -. For each <> field, computes aggregations -for all documents in the bucket. Depending on the metric type of each metric -field a different set of pre-aggregated results is stored: - -** `gauge`: The `min`, `max`, `sum`, and `value_count` are stored; `value_count` -is stored as type `aggregate_metric_double`. -** `counter`: The `last_value` is stored. -. For all other fields, the most recent value is copied to the target index. - -[[downsample-api-mappings]] -==== Source and target index field mappings - -Fields in the target, downsampled index are created based on fields in the -original source index, as follows: - -. All fields mapped with the `time-series-dimension` parameter are created in -the target downsample index with the same mapping as in the source index. -. All fields mapped with the `time_series_metric` parameter are created -in the target downsample index with the same mapping as in the source -index. An exception is that for fields mapped as `time_series_metric: gauge` -the field type is changed to `aggregate_metric_double`. -. All other fields that are neither dimensions nor metrics (that is, label -fields), are created in the target downsample index with the same mapping -that they had in the source index. - -Check the <> documentation for an overview and -examples of running downsampling manually and as part of an ILM policy. +NOTE: Smaller, more granular intervals take up proportionally more space. \ No newline at end of file diff --git a/docs/reference/indices/field-usage-stats.asciidoc b/docs/reference/indices/field-usage-stats.asciidoc index a4856092834e5..cbb65f8c2effb 100644 --- a/docs/reference/indices/field-usage-stats.asciidoc +++ b/docs/reference/indices/field-usage-stats.asciidoc @@ -6,6 +6,12 @@ experimental[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns field usage information for each shard and field of an index. Field usage statistics are automatically captured when diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 61c44f157da95..a458e6906b62f 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -4,6 +4,12 @@ Flush ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Flushes one or more data streams or indices. [source,console] diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc index e1581a3cfa632..cddf368ce2398 100644 --- a/docs/reference/indices/forcemerge.asciidoc +++ b/docs/reference/indices/forcemerge.asciidoc @@ -4,6 +4,12 @@ Force merge ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Forces a <> on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices. diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc index d4c5b92116949..d305bf02e746a 100644 --- a/docs/reference/indices/get-alias.asciidoc +++ b/docs/reference/indices/get-alias.asciidoc @@ -4,6 +4,12 @@ Get alias ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Retrieves information for one or more <>. [source,console] diff --git a/docs/reference/indices/get-component-template.asciidoc b/docs/reference/indices/get-component-template.asciidoc index f35192ca448db..33676c0cf2a71 100644 --- a/docs/reference/indices/get-component-template.asciidoc +++ b/docs/reference/indices/get-component-template.asciidoc @@ -4,6 +4,12 @@ Get component template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Retrieves information about one or more component templates. ////////////////////////// @@ -67,7 +73,7 @@ Wildcard (`*`) expressions are supported. include::{docdir}/rest-api/common-parms.asciidoc[tag=flat-settings] -include::{docdir}/rest-api/common-parms.asciidoc[tag=local] +include::{docdir}/rest-api/common-parms.asciidoc[tag=local-deprecated-9.0.0] include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 6bf150897acab..ccab53f020e5f 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -5,6 +5,12 @@ Get data stream ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Retrieves information about one or more <>. See <>. diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc index ac5895872fbb5..4b25e0e925541 100644 --- a/docs/reference/indices/get-field-mapping.asciidoc +++ b/docs/reference/indices/get-field-mapping.asciidoc @@ -4,6 +4,12 @@ Get field mapping ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Retrieves <> for one or more fields. For data streams, the API retrieves field mappings for the stream's backing indices. diff --git a/docs/reference/indices/get-index-template-v1.asciidoc b/docs/reference/indices/get-index-template-v1.asciidoc index 602ca2fe454ad..d1b652e3987d8 100644 --- a/docs/reference/indices/get-index-template-v1.asciidoc +++ b/docs/reference/indices/get-index-template-v1.asciidoc @@ -4,10 +4,16 @@ Get index template (legacy) ++++ -IMPORTANT: This documentation is about legacy index templates, -which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. +IMPORTANT: This documentation is about legacy index templates, +which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Retrieves information about one or more index templates. //// @@ -63,7 +69,7 @@ or use a value of `_all` or `*`. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=flat-settings] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local-deprecated-9.0.0] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] diff --git a/docs/reference/indices/get-index-template.asciidoc b/docs/reference/indices/get-index-template.asciidoc index 2cde5adc8ae23..4cbf2b313f9de 100644 --- a/docs/reference/indices/get-index-template.asciidoc +++ b/docs/reference/indices/get-index-template.asciidoc @@ -4,6 +4,12 @@ Get index template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns information about one or more index templates. //// @@ -59,7 +65,7 @@ expressions. If omitted, all templates are returned. include::{docdir}/rest-api/common-parms.asciidoc[tag=flat-settings] -include::{docdir}/rest-api/common-parms.asciidoc[tag=local] +include::{docdir}/rest-api/common-parms.asciidoc[tag=local-deprecated-9.0.0] include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc index 2551d25801d70..4e026f4ff07a3 100644 --- a/docs/reference/indices/get-index.asciidoc +++ b/docs/reference/indices/get-index.asciidoc @@ -4,6 +4,12 @@ Get index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns information about one or more indices. For data streams, the API returns information about the stream's backing indices. diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc index 16dc8c66d0715..543bda6cb3242 100644 --- a/docs/reference/indices/get-mapping.asciidoc +++ b/docs/reference/indices/get-mapping.asciidoc @@ -4,6 +4,12 @@ Get mapping ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Retrieves <> for one or more indices. For data streams, the API retrieves mappings for the stream's backing indices. diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc index b6cb6d2926387..d48514b9a3493 100644 --- a/docs/reference/indices/get-settings.asciidoc +++ b/docs/reference/indices/get-settings.asciidoc @@ -4,6 +4,12 @@ Get index settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns setting information for one or more indices. For data streams, the API returns setting information for the stream's backing indices. diff --git a/docs/reference/indices/index-mgmt.asciidoc b/docs/reference/indices/index-mgmt.asciidoc index 73643dbfd4b3b..131bc79faa40c 100644 --- a/docs/reference/indices/index-mgmt.asciidoc +++ b/docs/reference/indices/index-mgmt.asciidoc @@ -43,7 +43,7 @@ For more information on managing indices, refer to <>. * To filter the list of indices, use the search bar or click a badge. Badges indicate if an index is a <>, a -<>, or <>. +<>, or <>. * To drill down into the index <>, <>, and statistics, diff --git a/docs/reference/indices/index-template-exists-v1.asciidoc b/docs/reference/indices/index-template-exists-v1.asciidoc index 2358f0b1a376e..f7b3a4416715d 100644 --- a/docs/reference/indices/index-template-exists-v1.asciidoc +++ b/docs/reference/indices/index-template-exists-v1.asciidoc @@ -9,6 +9,12 @@ templates>>, which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Checks if an <> exists. diff --git a/docs/reference/indices/index-templates.asciidoc b/docs/reference/indices/index-templates.asciidoc index 5b152ecf177ec..90c4a6952446e 100644 --- a/docs/reference/indices/index-templates.asciidoc +++ b/docs/reference/indices/index-templates.asciidoc @@ -61,7 +61,7 @@ applying the templates, do one or more of the following: - Use a non-overlapping index pattern. -- Assign templates with an overlapping pattern a `priority` higher than `200`. +- Assign templates with an overlapping pattern a `priority` higher than `500`. For example, if you don't use {fleet} or {agent} and want to create a template for the `logs-*` index pattern, assign your template a priority of `500`. This ensures your template is applied instead of the built-in template for diff --git a/docs/reference/indices/indices-exists.asciidoc b/docs/reference/indices/indices-exists.asciidoc index d699d36add03b..11da491adf8e5 100644 --- a/docs/reference/indices/indices-exists.asciidoc +++ b/docs/reference/indices/indices-exists.asciidoc @@ -4,6 +4,12 @@ Exists ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Checks if a data stream, index, or alias exists. [source,console] diff --git a/docs/reference/indices/migrate-to-data-stream.asciidoc b/docs/reference/indices/migrate-to-data-stream.asciidoc index 48302761c7dc9..745e3e28683c4 100644 --- a/docs/reference/indices/migrate-to-data-stream.asciidoc +++ b/docs/reference/indices/migrate-to-data-stream.asciidoc @@ -5,6 +5,12 @@ Migrate to data stream ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-data-stream[Data stream APIs]. +-- + Converts an <> to a <>. //// diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index a077c4d19fd56..fe00e5a968d96 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -4,6 +4,12 @@ Open index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Opens a closed index. For data streams, the API opens any closed backing indices. diff --git a/docs/reference/indices/put-component-template.asciidoc b/docs/reference/indices/put-component-template.asciidoc index d880edfe42b8c..9f129c3507d87 100644 --- a/docs/reference/indices/put-component-template.asciidoc +++ b/docs/reference/indices/put-component-template.asciidoc @@ -4,6 +4,12 @@ Create or update component template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Creates or updates a component template. Component templates are building blocks for constructing <> that specify index <>, <>, and diff --git a/docs/reference/indices/put-index-template-v1.asciidoc b/docs/reference/indices/put-index-template-v1.asciidoc index 86a8a54edd97f..8b07acfdadba8 100644 --- a/docs/reference/indices/put-index-template-v1.asciidoc +++ b/docs/reference/indices/put-index-template-v1.asciidoc @@ -8,6 +8,12 @@ IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in {es} 7.8. For information about composable templates, see <>. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Creates or updates an index template. [source,console] diff --git a/docs/reference/indices/put-index-template.asciidoc b/docs/reference/indices/put-index-template.asciidoc index 9a31037546796..fac928c4a61b0 100644 --- a/docs/reference/indices/put-index-template.asciidoc +++ b/docs/reference/indices/put-index-template.asciidoc @@ -4,6 +4,12 @@ Create or update index template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Creates or updates an index template. Index templates define <>, <>, and <> that can be applied automatically to new indices. diff --git a/docs/reference/indices/put-mapping.asciidoc b/docs/reference/indices/put-mapping.asciidoc index dc6dbff1df42c..479bdff22a80c 100644 --- a/docs/reference/indices/put-mapping.asciidoc +++ b/docs/reference/indices/put-mapping.asciidoc @@ -4,6 +4,12 @@ Update mapping ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Adds new fields to an existing data stream or index. You can also use this API to change the search settings of existing fields. diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 06b4d9d92e49f..032ffc5c5a082 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -4,6 +4,12 @@ Index recovery ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices. diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc index bd8e821ff56b9..6cc78b22323c1 100644 --- a/docs/reference/indices/refresh.asciidoc +++ b/docs/reference/indices/refresh.asciidoc @@ -4,6 +4,12 @@ Refresh ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream's backing indices. For more information about the refresh operation, see diff --git a/docs/reference/indices/resolve-cluster.asciidoc b/docs/reference/indices/resolve-cluster.asciidoc index 48e6bfac4af10..a0583f4d7beaf 100644 --- a/docs/reference/indices/resolve-cluster.asciidoc +++ b/docs/reference/indices/resolve-cluster.asciidoc @@ -4,6 +4,12 @@ Resolve cluster ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Resolves the specified index expressions to return information about each cluster, including the local cluster, if included. @@ -22,8 +28,19 @@ For each cluster in the index expression, information is returned about: 3. whether there are any indices, aliases or data streams on that cluster that match the index expression 4. whether the search is likely to have errors returned when you do the {ccs} (including any - authorization errors if your user does not have permission to query the index) -5. cluster version information, including the Elasticsearch server version + authorization errors if your user does not have permission to query a remote cluster or + the indices on that cluster) +5. (in some cases) cluster version information, including the Elasticsearch server version + +[TIP] +==== +Whenever a security exception is returned for a remote cluster, that remote +will always be marked as connected=false in the response, since your user does not have +permissions to access that cluster (or perhaps the remote index) you are querying. +Once the proper security permissions are obtained, then you can rely on the `connected` field +in the response to determine whether the remote cluster is available and ready for querying. +==== + //// [source,console] diff --git a/docs/reference/indices/resolve.asciidoc b/docs/reference/indices/resolve.asciidoc index 856546b037fea..f0b91695b0aa1 100644 --- a/docs/reference/indices/resolve.asciidoc +++ b/docs/reference/indices/resolve.asciidoc @@ -4,6 +4,12 @@ Resolve index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Resolves the specified name(s) and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 2a47d28e5358d..cc93204469b80 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -4,6 +4,12 @@ Rollover ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Creates a new index for a <> or <>. [source,console] diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc index 54f0b6c0cd6de..b2b7b244321f4 100644 --- a/docs/reference/indices/segments.asciidoc +++ b/docs/reference/indices/segments.asciidoc @@ -4,6 +4,12 @@ Index segments ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns low-level information about the https://lucene.apache.org/core/[Lucene] segments in index shards. For data streams, the API returns information about the stream's backing indices. diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index 1b001a3175b8c..35f6a0915caa0 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -4,6 +4,12 @@ Index shard stores ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 244733282d46e..931731fceb49d 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -4,6 +4,12 @@ Shrink index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Shrinks an existing index into a new index with fewer primary shards. diff --git a/docs/reference/indices/simulate-index.asciidoc b/docs/reference/indices/simulate-index.asciidoc index 5e5709a2d82fc..414b54aa5f414 100644 --- a/docs/reference/indices/simulate-index.asciidoc +++ b/docs/reference/indices/simulate-index.asciidoc @@ -4,6 +4,12 @@ Simulate index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns the index configuration that would be applied to the specified index from an existing <>. diff --git a/docs/reference/indices/simulate-template.asciidoc b/docs/reference/indices/simulate-template.asciidoc index c7397ace97886..1ea72970b647a 100644 --- a/docs/reference/indices/simulate-template.asciidoc +++ b/docs/reference/indices/simulate-template.asciidoc @@ -4,6 +4,12 @@ Simulate template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns the index configuration that would be applied by a particular <>. diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 0c93b572639db..4bda778839735 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -4,6 +4,12 @@ Split index ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Splits an existing index into a new index with more primary shards. [source,console] diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc index 088d65c37ec6e..1370935a7cde7 100644 --- a/docs/reference/indices/stats.asciidoc +++ b/docs/reference/indices/stats.asciidoc @@ -4,6 +4,12 @@ Index stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream's backing indices. diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 3b29946d5ed7d..dd92a922ec655 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -4,6 +4,12 @@ Update index settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs]. +-- + Changes a <> in real time. For data streams, index setting changes are applied to all backing indices by diff --git a/docs/reference/inference/chat-completion-inference.asciidoc b/docs/reference/inference/chat-completion-inference.asciidoc new file mode 100644 index 0000000000000..83a8f94634f2f --- /dev/null +++ b/docs/reference/inference/chat-completion-inference.asciidoc @@ -0,0 +1,417 @@ +[role="xpack"] +[[chat-completion-inference-api]] +=== Chat completion inference API + +Streams a chat completion response. + +IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. + + +[discrete] +[[chat-completion-inference-api-request]] +==== {api-request-title} + +`POST /_inference//_unified` + +`POST /_inference/chat_completion//_unified` + + +[discrete] +[[chat-completion-inference-api-prereqs]] +==== {api-prereq-title} + +* Requires the `monitor_inference` <> +(the built-in `inference_admin` and `inference_user` roles grant this privilege) +* You must use a client that supports streaming. + + +[discrete] +[[chat-completion-inference-api-desc]] +==== {api-description-title} + +The chat completion {infer} API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `chat_completion` task type for `openai` and `elastic` {infer} services. + +[NOTE] +==== +The `chat_completion` task type is only available within the _unified API and only supports streaming. +==== + +[discrete] +[[chat-completion-inference-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +The unique identifier of the {infer} endpoint. + + +``:: +(Optional, string) +The type of {infer} task that the model performs. If included, this must be set to the value `chat_completion`. + + +[discrete] +[[chat-completion-inference-api-request-body]] +==== {api-request-body-title} + +`messages`:: +(Required, array of objects) A list of objects representing the conversation. +Requests should generally only add new messages from the user (role `user`). The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation. ++ +.Assistant message +[%collapsible%closed] +===== +`content`:: +(Required unless `tool_calls` is specified, string or array of objects) +The contents of the message. ++ +include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples] ++ +`role`:: +(Required, string) +The role of the message author. This should be set to `assistant` for this type of message. ++ +`tool_calls`:: +(Optional, array of objects) +The tool calls generated by the model. ++ +.Examples +[%collapsible%closed] +====== +[source,js] +------------------------------------------------------------ +{ + "tool_calls": [ + { + "id": "call_KcAjWtAww20AihPHphUh46Gd", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\":\"Boston, MA\"}" + } + } + ] +} +------------------------------------------------------------ +// NOTCONSOLE +====== ++ +`id`::: +(Required, string) +The identifier of the tool call. ++ +`type`::: +(Required, string) +The type of tool call. This must be set to the value `function`. ++ +`function`::: +(Required, object) +The function that the model called. ++ +`name`:::: +(Required, string) +The name of the function to call. ++ +`arguments`:::: +(Required, string) +The arguments to call the function with in JSON format. +===== ++ +.System message +[%collapsible%closed] +===== +`content`::: +(Required, string or array of objects) +The contents of the message. ++ +include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples] ++ +`role`::: +(Required, string) +The role of the message author. This should be set to `system` for this type of message. +===== ++ +.Tool message +[%collapsible%closed] +===== +`content`:: +(Required, string or array of objects) +The contents of the message. ++ +include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples] ++ +`role`:: +(Required, string) +The role of the message author. This should be set to `tool` for this type of message. ++ +`tool_call_id`:: +(Required, string) +The tool call that this message is responding to. +===== ++ +.User message +[%collapsible%closed] +===== +`content`:: +(Required, string or array of objects) +The contents of the message. ++ +include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples] ++ +`role`:: +(Required, string) +The role of the message author. This should be set to `user` for this type of message. +===== + +`model`:: +(Optional, string) +The ID of the model to use. By default, the model ID is set to the value included when creating the inference endpoint. + +`max_completion_tokens`:: +(Optional, integer) +The upper bound limit for the number of tokens that can be generated for a completion request. + +`stop`:: +(Optional, array of strings) +A sequence of strings to control when the model should stop generating additional tokens. + +`temperature`:: +(Optional, float) +The sampling temperature to use. + +`tools`:: +(Optional, array of objects) +A list of tools that the model can call. ++ +.Structure +[%collapsible%closed] +===== +`type`:: +(Required, string) +The type of tool, must be set to the value `function`. ++ +`function`:: +(Required, object) +The function definition. ++ +`description`::: +(Optional, string) +A description of what the function does. This is used by the model to choose when and how to call the function. ++ +`name`::: +(Required, string) +The name of the function. ++ +`parameters`::: +(Optional, object) +The parameters the functional accepts. This should be formatted as a JSON object. ++ +`strict`::: +(Optional, boolean) +Whether to enable schema adherence when generating the function call. +===== ++ +.Examples +[%collapsible%closed] +====== +[source,js] +------------------------------------------------------------ +{ + "tools": [ + { + "type": "function", + "function": { + "name": "get_price_of_item", + "description": "Get the current price of an item", + "parameters": { + "type": "object", + "properties": { + "item": { + "id": "12345" + }, + "unit": { + "type": "currency" + } + } + } + } + } + ] +} +------------------------------------------------------------ +// NOTCONSOLE +====== + +`tool_choice`:: +(Optional, string or object) +Controls which tool is called by the model. ++ +String representation::: +One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools. ++ +Object representation::: ++ +.Structure +[%collapsible%closed] +===== +`type`:: +(Required, string) +The type of the tool. This must be set to the value `function`. ++ +`function`:: +(Required, object) ++ +`name`::: +(Required, string) +The name of the function to call. +===== ++ +.Examples +[%collapsible%closed] +===== +[source,js] +------------------------------------------------------------ +{ + "tool_choice": { + "type": "function", + "function": { + "name": "get_current_weather" + } + } +} +------------------------------------------------------------ +// NOTCONSOLE +===== + +`top_p`:: +(Optional, float) +Nucleus sampling, an alternative to sampling with temperature. + +[discrete] +[[chat-completion-inference-api-example]] +==== {api-examples-title} + +The following example performs a chat completion on the example question with streaming. + + +[source,console] +------------------------------------------------------------ +POST _inference/chat_completion/openai-completion/_stream +{ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What is Elastic?" + } + ] +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The following example performs a chat completion using an Assistant message with `tool_calls`. + +[source,console] +------------------------------------------------------------ +POST _inference/chat_completion/openai-completion/_stream +{ + "messages": [ + { + "role": "assistant", + "content": "Let's find out what the weather is", + "tool_calls": [ <1> + { + "id": "call_KcAjWtAww20AihPHphUh46Gd", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\":\"Boston, MA\"}" + } + } + ] + }, + { <2> + "role": "tool", + "content": "The weather is cold", + "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" + } + ] +} +------------------------------------------------------------ +// TEST[skip:TBD] + +<1> Each tool call needs a corresponding Tool message. +<2> The corresponding Tool message. + +The following example performs a chat completion using a User message with `tools` and `tool_choice`. + +[source,console] +------------------------------------------------------------ +POST _inference/chat_completion/openai-completion/_stream +{ + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What's the price of a scarf?" + } + ] + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_price", + "description": "Get the current price of a item", + "parameters": { + "type": "object", + "properties": { + "item": { + "id": "123" + } + } + } + } + } + ], + "tool_choice": { + "type": "function", + "function": { + "name": "get_current_price" + } + } +} +------------------------------------------------------------ +// TEST[skip:TBD] + +The API returns the following response when a request is made to the OpenAI service: + + +[source,txt] +------------------------------------------------------------ +event: message +data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":"","role":"assistant"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}} + +event: message +data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":Elastic"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}} + +event: message +data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":" is"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}} + +(...) + +event: message +data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk","usage":{"completion_tokens":28,"prompt_tokens":16,"total_tokens":44}}} <1> + +event: message +data: [DONE] +------------------------------------------------------------ +// NOTCONSOLE + +<1> The last object message of the stream contains the token usage information. diff --git a/docs/reference/inference/delete-inference.asciidoc b/docs/reference/inference/delete-inference.asciidoc index a83fb1a516b80..a9dfeabcdf259 100644 --- a/docs/reference/inference/delete-inference.asciidoc +++ b/docs/reference/inference/delete-inference.asciidoc @@ -2,6 +2,12 @@ [[delete-inference-api]] === Delete {infer} API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Deletes an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/inference/get-inference.asciidoc b/docs/reference/inference/get-inference.asciidoc index 16e38d2aa148b..74220514eeb98 100644 --- a/docs/reference/inference/get-inference.asciidoc +++ b/docs/reference/inference/get-inference.asciidoc @@ -2,6 +2,12 @@ [[get-inference-api]] === Get {infer} API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Retrieves {infer} endpoint information. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. @@ -78,12 +84,17 @@ The API returns the following response: { "inference_id": "my-elser-model", "task_type": "sparse_embedding", - "service": "elser", + "service": "elasticsearch", "service_settings": { "num_allocations": 1, - "num_threads": 1 + "num_threads": 1, + "model_id": ".elser_model_2" }, - "task_settings": {} + "chunking_settings": { + "strategy": "sentence", + "max_chunk_size": 250, + "sentence_overlap": 1 + } } ------------------------------------------------------------ // NOTCONSOLE diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index 8d5ee1b7d6ba5..4f27409973ca2 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -10,6 +10,12 @@ trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + The {infer} APIs enable you to create {infer} endpoints and use {ml} models of different providers - such as Amazon Bedrock, Anthropic, Azure AI Studio, Cohere, Google AI, Mistral, OpenAI, or HuggingFace - as a service. Use @@ -20,6 +26,7 @@ the following APIs to manage {infer} models and perform {infer}: * <> * <> * <> +* <> * <> [[inference-landscape]] @@ -28,9 +35,9 @@ image::images/inference-landscape.jpg[A representation of the Elastic inference An {infer} endpoint enables you to use the corresponding {ml} model without manual deployment and apply it to your data at ingestion time through -<>. +<>. -Choose a model from your provider or use ELSER – a retrieval model trained by +Choose a model from your provider or use ELSER – a retrieval model trained by Elastic –, then create an {infer} endpoint by the <>. Now use <> to perform <> on your data. @@ -61,7 +68,7 @@ The following list contains the default {infer} endpoints listed by `inference_i Use the `inference_id` of the endpoint in a <> field definition or when creating an <>. The API call will automatically download and deploy the model which might take a couple of minutes. Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled. -For these models, the minimum number of allocations is `0`. +For these models, the minimum number of allocations is `0`. If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes. @@ -78,7 +85,7 @@ Returning a long document in search results is less useful than providing the mo Each chunk will include the text subpassage and the corresponding embedding generated from it. By default, documents are split into sentences and grouped in sections up to 250 words with 1 sentence overlap so that each chunk shares a sentence with the previous chunk. -Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. +Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. {es} uses the https://unicode-org.github.io/icu-docs/[ICU4J] library to detect word and sentence boundaries for chunking. https://unicode-org.github.io/icu/userguide/boundaryanalysis/#word-boundary[Word boundaries] are identified by following a series of rules, not just the presence of a whitespace character. @@ -129,6 +136,7 @@ PUT _inference/sparse_embedding/small_chunk_size include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] +include::chat-completion-inference.asciidoc[] include::put-inference.asciidoc[] include::stream-inference.asciidoc[] include::update-inference.asciidoc[] @@ -143,6 +151,7 @@ include::service-elser.asciidoc[] include::service-google-ai-studio.asciidoc[] include::service-google-vertex-ai.asciidoc[] include::service-hugging-face.asciidoc[] +include::service-jinaai.asciidoc[] include::service-mistral.asciidoc[] include::service-openai.asciidoc[] include::service-watsonx-ai.asciidoc[] diff --git a/docs/reference/inference/inference-shared.asciidoc b/docs/reference/inference/inference-shared.asciidoc index da497c6581e5d..b133c54082810 100644 --- a/docs/reference/inference/inference-shared.asciidoc +++ b/docs/reference/inference/inference-shared.asciidoc @@ -41,7 +41,7 @@ end::chunking-settings[] tag::chunking-settings-max-chunking-size[] Specifies the maximum size of a chunk in words. Defaults to `250`. -This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). +This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). end::chunking-settings-max-chunking-size[] tag::chunking-settings-overlap[] @@ -63,4 +63,48 @@ Specifies the chunking strategy. It could be either `sentence` or `word`. end::chunking-settings-strategy[] +tag::chat-completion-schema-content-with-examples[] +.Examples +[%collapsible%closed] +====== +String example +[source,js] +------------------------------------------------------------ +{ + "content": "Some string" +} +------------------------------------------------------------ +// NOTCONSOLE + +Object example +[source,js] +------------------------------------------------------------ +{ + "content": [ + { + "text": "Some text", + "type": "text" + } + ] +} +------------------------------------------------------------ +// NOTCONSOLE +====== + +String representation::: +(Required, string) +The text content. ++ +Object representation::: +`text`:::: +(Required, string) +The text content. ++ +`type`:::: +(Required, string) +This must be set to the value `text`. +end::chat-completion-schema-content-with-examples[] +tag::chat-completion-docs[] +For more information on how to use the `chat_completion` task type, please refer to the <>. +end::chat-completion-docs[] diff --git a/docs/reference/inference/post-inference.asciidoc b/docs/reference/inference/post-inference.asciidoc index 4edefcc911e2e..07e557aa355bd 100644 --- a/docs/reference/inference/post-inference.asciidoc +++ b/docs/reference/inference/post-inference.asciidoc @@ -2,6 +2,12 @@ [[post-inference-api]] === Perform inference API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Performs an inference task on an input text by using an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 4f82889f562d8..da07d1d3e7d84 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -2,6 +2,12 @@ [[put-inference-api]] === Create {infer} API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task. [IMPORTANT] @@ -36,7 +42,7 @@ include::inference-shared.asciidoc[tag=inference-id] include::inference-shared.asciidoc[tag=task-type] + -- -Refer to the service list in the <> for the available task types. +Refer to the service list in the <> for the available task types. -- @@ -55,7 +61,7 @@ The create {infer} API enables you to create an {infer} endpoint and configure a The following services are available through the {infer} API. -You can find the available task types next to the service name. +You can find the available task types next to the service name. Click the links to review the configuration details of the services: * <> (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) @@ -67,11 +73,12 @@ Click the links to review the configuration details of the services: * <> (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) * <> (`sparse_embedding`) * <> (`completion`, `text_embedding`) -* <> (`rerank`, `text_embedding`) +* <> (`rerank`, `text_embedding`) * <> (`text_embedding`) * <> (`text_embedding`) -* <> (`completion`, `text_embedding`) +* <> (`chat_completion`, `completion`, `text_embedding`) * <> (`text_embedding`) +* <> (`text_embedding`, `rerank`) The {es} and ELSER services run on a {ml} node in your {es} cluster. The rest of the services connect to external providers. @@ -87,4 +94,4 @@ When adaptive allocations are enabled: - The number of allocations scales up automatically when the load increases. - Allocations scale down to a minimum of 0 when the load decreases, saving resources. -For more information about adaptive allocations and resources, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] documentation. \ No newline at end of file +For more information about adaptive allocations and resources, refer to the {ml-docs}/ml-nlp-auto-scale.html[trained model autoscaling] documentation. diff --git a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc index c3ff40a39cd86..5a11190549ce6 100644 --- a/docs/reference/inference/service-alibabacloud-ai-search.asciidoc +++ b/docs/reference/inference/service-alibabacloud-ai-search.asciidoc @@ -1,6 +1,12 @@ [[infer-service-alibabacloud-ai-search]] === AlibabaCloud AI Search {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `alibabacloud-ai-search` service. [discrete] diff --git a/docs/reference/inference/service-amazon-bedrock.asciidoc b/docs/reference/inference/service-amazon-bedrock.asciidoc index 761777e32f8e0..ed25ce0d515b5 100644 --- a/docs/reference/inference/service-amazon-bedrock.asciidoc +++ b/docs/reference/inference/service-amazon-bedrock.asciidoc @@ -1,6 +1,12 @@ [[infer-service-amazon-bedrock]] === Amazon Bedrock {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `amazonbedrock` service. [discrete] diff --git a/docs/reference/inference/service-anthropic.asciidoc b/docs/reference/inference/service-anthropic.asciidoc index 7fb3d1d5bea34..4ce76dc1d57bd 100644 --- a/docs/reference/inference/service-anthropic.asciidoc +++ b/docs/reference/inference/service-anthropic.asciidoc @@ -1,6 +1,12 @@ [[infer-service-anthropic]] === Anthropic {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `anthropic` service. diff --git a/docs/reference/inference/service-azure-ai-studio.asciidoc b/docs/reference/inference/service-azure-ai-studio.asciidoc index dd13a3e59aae5..7ada8df1ecdaa 100644 --- a/docs/reference/inference/service-azure-ai-studio.asciidoc +++ b/docs/reference/inference/service-azure-ai-studio.asciidoc @@ -1,6 +1,12 @@ [[infer-service-azure-ai-studio]] === Azure AI studio {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `azureaistudio` service. diff --git a/docs/reference/inference/service-azure-openai.asciidoc b/docs/reference/inference/service-azure-openai.asciidoc index b134e2b687f6c..170c0939166f7 100644 --- a/docs/reference/inference/service-azure-openai.asciidoc +++ b/docs/reference/inference/service-azure-openai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-azure-openai]] === Azure OpenAI {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `azureopenai` service. diff --git a/docs/reference/inference/service-cohere.asciidoc b/docs/reference/inference/service-cohere.asciidoc index 1a815e3c45f36..70e311c810cdd 100644 --- a/docs/reference/inference/service-cohere.asciidoc +++ b/docs/reference/inference/service-cohere.asciidoc @@ -1,6 +1,12 @@ [[infer-service-cohere]] === Cohere {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `cohere` service. diff --git a/docs/reference/inference/service-elasticsearch.asciidoc b/docs/reference/inference/service-elasticsearch.asciidoc index bf7e2976bbe63..8870fbed357a6 100644 --- a/docs/reference/inference/service-elasticsearch.asciidoc +++ b/docs/reference/inference/service-elasticsearch.asciidoc @@ -1,10 +1,19 @@ [[infer-service-elasticsearch]] === Elasticsearch {infer} service -Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- -NOTE: If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. +Creates an {infer} endpoint to perform an {infer} task with the `elasticsearch` service. +[NOTE] +==== +* Your {es} deployment contains <>, you only need to create the enpoints using the API if you want to customize the settings. +* If you use the ELSER or the E5 model through the `elasticsearch` service, the API request will automatically download and deploy the model if it isn't downloaded yet. +==== [discrete] [[infer-service-elasticsearch-api-request]] diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index c1cc23c8c9adb..47aaa58814602 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -1,17 +1,26 @@ [[infer-service-elser]] === ELSER {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `elser` service. You can also deploy ELSER by using the <>. -NOTE: The API request will automatically download and deploy the ELSER model if -it isn't already downloaded. +[NOTE] +==== +* Your {es} deployment contains <>, you only need to create the enpoint using the API if you want to customize the settings. +* The API request will automatically download and deploy the ELSER model if it isn't already downloaded. +==== [WARNING] .Deprecated in 8.16 ==== -The elser service is deprecated and will be removed in a future release. -Use the <> instead, with model_id included in the service_settings. +The `elser` service is deprecated and will be removed in a future release. +Use the <> instead, with `model_id` included in the `service_settings`. ==== [discrete] diff --git a/docs/reference/inference/service-google-ai-studio.asciidoc b/docs/reference/inference/service-google-ai-studio.asciidoc index 738fce3d53e9b..5b30292fb9beb 100644 --- a/docs/reference/inference/service-google-ai-studio.asciidoc +++ b/docs/reference/inference/service-google-ai-studio.asciidoc @@ -1,6 +1,12 @@ [[infer-service-google-ai-studio]] === Google AI Studio {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `googleaistudio` service. diff --git a/docs/reference/inference/service-google-vertex-ai.asciidoc b/docs/reference/inference/service-google-vertex-ai.asciidoc index 34e14e05e072a..28fa65b6e5fcc 100644 --- a/docs/reference/inference/service-google-vertex-ai.asciidoc +++ b/docs/reference/inference/service-google-vertex-ai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-google-vertex-ai]] === Google Vertex AI {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `googlevertexai` service. diff --git a/docs/reference/inference/service-hugging-face.asciidoc b/docs/reference/inference/service-hugging-face.asciidoc index 6d8667351a6b4..862914c141740 100644 --- a/docs/reference/inference/service-hugging-face.asciidoc +++ b/docs/reference/inference/service-hugging-face.asciidoc @@ -1,6 +1,12 @@ [[infer-service-hugging-face]] === HuggingFace {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `hugging_face` service. diff --git a/docs/reference/inference/service-jinaai.asciidoc b/docs/reference/inference/service-jinaai.asciidoc new file mode 100644 index 0000000000000..7c5aebe5bcf8e --- /dev/null +++ b/docs/reference/inference/service-jinaai.asciidoc @@ -0,0 +1,255 @@ +[[infer-service-jinaai]] +=== JinaAI {infer} service + +Creates an {infer} endpoint to perform an {infer} task with the `jinaai` service. + + +[discrete] +[[infer-service-jinaai-api-request]] +==== {api-request-title} + +`PUT /_inference//` + +[discrete] +[[infer-service-jinaai-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Available task types: + +* `text_embedding`, +* `rerank`. +-- + +[discrete] +[[infer-service-jinaai-api-request-body]] +==== {api-request-body-title} + +`chunking_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=chunking-settings] + +`max_chunking_size`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-max-chunking-size] + +`overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-overlap] + +`sentence_overlap`::: +(Optional, integer) +include::inference-shared.asciidoc[tag=chunking-settings-sentence-overlap] + +`strategy`::: +(Optional, string) +include::inference-shared.asciidoc[tag=chunking-settings-strategy] + +`service`:: +(Required, string) +The type of service supported for the specified task type. In this case, +`jinaai`. + +`service_settings`:: +(Required, object) +include::inference-shared.asciidoc[tag=service-settings] ++ +-- +These settings are specific to the `jinaai` service. +-- + +`api_key`::: +(Required, string) +A valid API key for your JinaAI account. +You can find it at https://jina.ai/embeddings/. ++ +-- +include::inference-shared.asciidoc[tag=api-key-admonition] +-- + +`rate_limit`::: +(Optional, object) +The default rate limit for the `jinaai` service is 2000 requests per minute for all task types. +You can modify this using the `requests_per_minute` setting in your service settings: ++ +-- +include::inference-shared.asciidoc[tag=request-per-minute-example] + +More information about JinaAI's rate limits can be found in https://jina.ai/contact-sales/#rate-limit. +-- ++ +.`service_settings` for the `rerank` task type +[%collapsible%closed] +===== +`model_id`:: +(Required, string) +The name of the model to use for the {infer} task. +To review the available `rerank` compatible models, refer to https://jina.ai/reranker. +===== ++ +.`service_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`model_id`::: +(Optional, string) +The name of the model to use for the {infer} task. +To review the available `text_embedding` models, refer to the +https://jina.ai/embeddings/. + +`similarity`::: +(Optional, string) +Similarity measure. One of `cosine`, `dot_product`, `l2_norm`. +Defaults based on the `embedding_type` (`float` -> `dot_product`, `int8/byte` -> `cosine`). +===== + + + +`task_settings`:: +(Optional, object) +include::inference-shared.asciidoc[tag=task-settings] ++ +.`task_settings` for the `rerank` task type +[%collapsible%closed] +===== +`return_documents`:: +(Optional, boolean) +Specify whether to return doc text within the results. + +`top_n`:: +(Optional, integer) +The number of most relevant documents to return, defaults to the number of the documents. +If this {infer} endpoint is used in a `text_similarity_reranker` retriever query and `top_n` is set, it must be greater than or equal to `rank_window_size` in the query. +===== ++ +.`task_settings` for the `text_embedding` task type +[%collapsible%closed] +===== +`task`::: +(Optional, string) +Specifies the task passed to the model. +Valid values are: +* `classification`: use it for embeddings passed through a text classifier. +* `clustering`: use it for the embeddings run through a clustering algorithm. +* `ingest`: use it for storing document embeddings in a vector database. +* `search`: use it for storing embeddings of search queries run against a vector database to find relevant documents. +===== + + +[discrete] +[[inference-example-jinaai]] +==== JinaAI service examples + +The following examples demonstrate how to create {infer} endpoints for `text_embeddings` and `rerank` tasks using the JinaAI service and use them in search requests. + +First, we create the `embeddings` service: + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/jinaai-embeddings +{ + "service": "jinaai", + "service_settings": { + "model_id": "jina-embeddings-v3", + "api_key": "" + } +} +------------------------------------------------------------ +// TEST[skip:uses ML] + +Then, we create the `rerank` service: +[source,console] +------------------------------------------------------------ +PUT _inference/rerank/jinaai-rerank +{ + "service": "jinaai", + "service_settings": { + "api_key": "", + "model_id": "jina-reranker-v2-base-multilingual" + }, + "task_settings": { + "top_n": 10, + "return_documents": true + } +} +------------------------------------------------------------ +// TEST[skip:uses ML] + +Now we can create an index that will use `jinaai-embeddings` service to index the documents. + +[source,console] +------------------------------------------------------------ +PUT jinaai-index +{ + "mappings": { + "properties": { + "content": { + "type": "semantic_text", + "inference_id": "jinaai-embeddings" + } + } + } +} +------------------------------------------------------------ +// TEST[skip:uses ML] + +[source,console] +------------------------------------------------------------ +PUT jinaai-index/_bulk +{ "index" : { "_index" : "jinaai-index", "_id" : "1" } } +{"content": "Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades."} +{ "index" : { "_index" : "jinaai-index", "_id" : "2" } } +{"content": "She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. "} +{ "index" : { "_index" : "jinaai-index", "_id" : "3" } } +{"content": "Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists."} +------------------------------------------------------------ +// TEST[skip:uses ML] + +Now, with the index created, we can search with and without the reranker service. + +[source,console] +------------------------------------------------------------ +GET jinaai-index/_search +{ + "query": { + "semantic": { + "field": "content", + "query": "who inspired taking care of the sea?" + } + } +} +------------------------------------------------------------ +// TEST[skip:uses ML] + +[source,console] +------------------------------------------------------------ +POST jinaai-index/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "semantic": { + "field": "content", + "query": "who inspired taking care of the sea?" + } + } + } + }, + "field": "content", + "rank_window_size": 100, + "inference_id": "jinaai-rerank", + "inference_text": "who inspired taking care of the sea?" + } + } +} +------------------------------------------------------------ +// TEST[skip:uses ML] \ No newline at end of file diff --git a/docs/reference/inference/service-mistral.asciidoc b/docs/reference/inference/service-mistral.asciidoc index 244381d107161..326e8458be767 100644 --- a/docs/reference/inference/service-mistral.asciidoc +++ b/docs/reference/inference/service-mistral.asciidoc @@ -1,6 +1,12 @@ [[infer-service-mistral]] === Mistral {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `mistral` service. diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc index 9211e2d08e88b..590f280b1c494 100644 --- a/docs/reference/inference/service-openai.asciidoc +++ b/docs/reference/inference/service-openai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-openai]] === OpenAI {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `openai` service. @@ -25,10 +31,18 @@ include::inference-shared.asciidoc[tag=task-type] -- Available task types: +* `chat_completion`, * `completion`, * `text_embedding`. -- +[NOTE] +==== +The `chat_completion` task type only supports streaming and only through the `_unified` API. + +include::inference-shared.asciidoc[tag=chat-completion-docs] +==== + [discrete] [[infer-service-openai-api-request-body]] ==== {api-request-body-title} @@ -55,7 +69,7 @@ include::inference-shared.asciidoc[tag=chunking-settings-strategy] `service`:: (Required, string) -The type of service supported for the specified task type. In this case, +The type of service supported for the specified task type. In this case, `openai`. `service_settings`:: @@ -170,4 +184,4 @@ PUT _inference/completion/openai-completion } } ------------------------------------------------------------ -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] diff --git a/docs/reference/inference/service-watsonx-ai.asciidoc b/docs/reference/inference/service-watsonx-ai.asciidoc index 597afc27fd0cf..e7bba7b4e9a97 100644 --- a/docs/reference/inference/service-watsonx-ai.asciidoc +++ b/docs/reference/inference/service-watsonx-ai.asciidoc @@ -1,6 +1,12 @@ [[infer-service-watsonx-ai]] === Watsonx {infer} service +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Creates an {infer} endpoint to perform an {infer} task with the `watsonxai` service. You need an https://cloud.ibm.com/docs/databases-for-elasticsearch?topic=databases-for-elasticsearch-provisioning&interface=api[IBM Cloud® Databases for Elasticsearch deployment] to use the `watsonxai` {infer} service. diff --git a/docs/reference/inference/stream-inference.asciidoc b/docs/reference/inference/stream-inference.asciidoc index e66acd630cb3e..4a3ce31909712 100644 --- a/docs/reference/inference/stream-inference.asciidoc +++ b/docs/reference/inference/stream-inference.asciidoc @@ -2,6 +2,12 @@ [[stream-inference-api]] === Stream inference API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Streams a chat completion response. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. @@ -32,8 +38,12 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo ==== {api-description-title} The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. -It only works with the `completion` task type. +It only works with the `completion` and `chat_completion` task types. +[NOTE] +==== +include::inference-shared.asciidoc[tag=chat-completion-docs] +==== [discrete] [[stream-inference-api-path-params]] diff --git a/docs/reference/inference/update-inference.asciidoc b/docs/reference/inference/update-inference.asciidoc index efd29231ac12e..d3a90f5d84e65 100644 --- a/docs/reference/inference/update-inference.asciidoc +++ b/docs/reference/inference/update-inference.asciidoc @@ -2,6 +2,12 @@ [[update-inference-api]] === Update inference API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-inference[{infer-cap} APIs]. +-- + Updates an {infer} endpoint. IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. diff --git a/docs/reference/ingest/apis/delete-ip-location-database.asciidoc b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc index c3a10a914d2f4..23f1d15bfa7ee 100644 --- a/docs/reference/ingest/apis/delete-ip-location-database.asciidoc +++ b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc @@ -4,6 +4,12 @@ Delete IP geolocation database configuration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Deletes a IP geolocation database configuration. [source,console] diff --git a/docs/reference/ingest/apis/delete-pipeline.asciidoc b/docs/reference/ingest/apis/delete-pipeline.asciidoc index 94ac87c61b56b..cd4cae1b636ca 100644 --- a/docs/reference/ingest/apis/delete-pipeline.asciidoc +++ b/docs/reference/ingest/apis/delete-pipeline.asciidoc @@ -4,6 +4,12 @@ Delete pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Deletes one or more existing ingest pipeline. //// diff --git a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc index 1d4b7e770325a..cd68bd1fcc67b 100644 --- a/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/delete-enrich-policy.asciidoc @@ -5,6 +5,12 @@ Delete enrich policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. +-- + Deletes an existing <> and its <>. diff --git a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc index ebad9f09250d3..8e72a51514a59 100644 --- a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc @@ -5,6 +5,12 @@ Execute enrich policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. +-- + Executes an existing <>. //// diff --git a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc index 88cf2cfa7cf60..2f9e069d5e43d 100644 --- a/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/get-enrich-policy.asciidoc @@ -5,6 +5,12 @@ Get enrich policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. +-- + Returns information about an <>. //// diff --git a/docs/reference/ingest/apis/enrich/index.asciidoc b/docs/reference/ingest/apis/enrich/index.asciidoc index a17c8179af1b1..7566b8812ec3c 100644 --- a/docs/reference/ingest/apis/enrich/index.asciidoc +++ b/docs/reference/ingest/apis/enrich/index.asciidoc @@ -2,6 +2,12 @@ [[enrich-apis]] == Enrich APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. +-- + The following enrich APIs are available for managing <>: diff --git a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc index ee33b0b320905..80223cb0ec711 100644 --- a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc @@ -5,6 +5,11 @@ Create enrich policy ++++ +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-enrich[Enrich APIs]. +-- + Creates an enrich policy. //// diff --git a/docs/reference/ingest/apis/geoip-stats.asciidoc b/docs/reference/ingest/apis/geoip-stats.asciidoc index 6135b7821f2a5..423fee1c5bceb 100644 --- a/docs/reference/ingest/apis/geoip-stats.asciidoc +++ b/docs/reference/ingest/apis/geoip-stats.asciidoc @@ -4,6 +4,12 @@ GeoIP stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Gets statistics about the <>, including download statistics for GeoIP2 databases used with it. diff --git a/docs/reference/ingest/apis/get-ip-location-database.asciidoc b/docs/reference/ingest/apis/get-ip-location-database.asciidoc index 26e9ddc1eee50..25f1c7ffdf695 100644 --- a/docs/reference/ingest/apis/get-ip-location-database.asciidoc +++ b/docs/reference/ingest/apis/get-ip-location-database.asciidoc @@ -4,6 +4,12 @@ Get IP geolocation database configuration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Returns information about one or more IP geolocation database configurations. [source,console] diff --git a/docs/reference/ingest/apis/get-pipeline.asciidoc b/docs/reference/ingest/apis/get-pipeline.asciidoc index f2a1155bca12b..88fc22590004b 100644 --- a/docs/reference/ingest/apis/get-pipeline.asciidoc +++ b/docs/reference/ingest/apis/get-pipeline.asciidoc @@ -4,6 +4,12 @@ Get pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Returns information about one or more ingest pipelines. This API returns a local reference of the pipeline. diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index 35adc47821978..e83131b554452 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -1,6 +1,12 @@ [[ingest-apis]] == Ingest APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Use ingest APIs to manage tasks and resources related to <> and processors. diff --git a/docs/reference/ingest/apis/put-ip-location-database.asciidoc b/docs/reference/ingest/apis/put-ip-location-database.asciidoc index e42d84752694c..1cdd789008881 100644 --- a/docs/reference/ingest/apis/put-ip-location-database.asciidoc +++ b/docs/reference/ingest/apis/put-ip-location-database.asciidoc @@ -4,6 +4,12 @@ Create or update IP geolocation database configuration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Creates or updates an IP geolocation database configuration. IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, diff --git a/docs/reference/ingest/apis/put-pipeline.asciidoc b/docs/reference/ingest/apis/put-pipeline.asciidoc index 5b532dedf8e82..7d9854a55758d 100644 --- a/docs/reference/ingest/apis/put-pipeline.asciidoc +++ b/docs/reference/ingest/apis/put-pipeline.asciidoc @@ -4,6 +4,12 @@ Create or update pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Creates or updates an <>. Changes made using this API take effect immediately. diff --git a/docs/reference/ingest/apis/simulate-ingest.asciidoc b/docs/reference/ingest/apis/simulate-ingest.asciidoc index 52ed09b1d32c2..b48faff876641 100644 --- a/docs/reference/ingest/apis/simulate-ingest.asciidoc +++ b/docs/reference/ingest/apis/simulate-ingest.asciidoc @@ -5,6 +5,12 @@ Simulate ingest ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Executes ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions. This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any diff --git a/docs/reference/ingest/apis/simulate-pipeline.asciidoc b/docs/reference/ingest/apis/simulate-pipeline.asciidoc index bb7c343383132..fe7e5f2e91c6a 100644 --- a/docs/reference/ingest/apis/simulate-pipeline.asciidoc +++ b/docs/reference/ingest/apis/simulate-pipeline.asciidoc @@ -5,6 +5,12 @@ Simulate pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ingest[Ingest APIs]. +-- + Executes an ingest pipeline against a set of provided documents. diff --git a/docs/reference/ingest/enrich.asciidoc b/docs/reference/ingest/enrich.asciidoc index 4bd50641149c0..3c08384eb23db 100644 --- a/docs/reference/ingest/enrich.asciidoc +++ b/docs/reference/ingest/enrich.asciidoc @@ -98,7 +98,8 @@ and <>. [IMPORTANT] ==== The enrich processor performs several operations and may impact the speed of -your ingest pipeline. +your ingest pipeline. We recommend <> co-locating +ingest and data roles to minimize remote search operations. We strongly recommend testing and benchmarking your enrich processors before deploying them in production. diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index 006cc96294477..73642b3bb3447 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -88,7 +88,7 @@ The `monitor_ml` <> is req To create the index-specific ML inference pipeline, go to *Search -> Content -> Indices -> -> Pipelines* in the Kibana UI. -If you only see the `ent-search-generic-ingestion` pipeline, you will need to click *Copy and customize* to create index-specific pipelines. +If you only see the `search-default-ingestion` pipeline, you will need to click *Copy and customize* to create index-specific pipelines. This will create the `{index_name}@ml-inference` pipeline. Once your index-specific ML inference pipeline is ready, you can add inference processors that use your ML trained models. diff --git a/docs/reference/ingest/search-ingest-pipelines.asciidoc b/docs/reference/ingest/search-ingest-pipelines.asciidoc index e414dacaab964..272c6ba2884b9 100644 --- a/docs/reference/ingest/search-ingest-pipelines.asciidoc +++ b/docs/reference/ingest/search-ingest-pipelines.asciidoc @@ -40,7 +40,7 @@ Considerations such as error handling, conditional execution, sequencing, versio To this end, when you create indices for search use cases, (including {enterprise-search-ref}/crawler.html[Elastic web crawler], <>. , and API indices), each index already has a pipeline set up with several processors that optimize your content for search. -This pipeline is called `ent-search-generic-ingestion`. +This pipeline is called `search-default-ingestion`. While it is a "managed" pipeline (meaning it should not be tampered with), you can view its details via the Kibana UI or the Elasticsearch API. You can also <>. @@ -56,14 +56,14 @@ This will not effect existing indices. Each index also provides the capability to easily create index-specific ingest pipelines with customizable processing. If you need that extra flexibility, you can create a custom pipeline by going to your pipeline settings and choosing to "copy and customize". -This will replace the index's use of `ent-search-generic-ingestion` with 3 newly generated pipelines: +This will replace the index's use of `search-default-ingestion` with 3 newly generated pipelines: 1. `` 2. `@custom` 3. `@ml-inference` -Like `ent-search-generic-ingestion`, the first of these is "managed", but the other two can and should be modified to fit your needs. -You can view these pipelines using the platform tools (Kibana UI, Elasticsearch API), and can also +Like `search-default-ingestion`, the first of these is "managed", but the other two can and should be modified to fit your needs. +You can view these pipelines using the platform tools (Kibana UI, Elasticsearch API), and can also <>. [discrete#ingest-pipeline-search-pipeline-settings] @@ -123,7 +123,7 @@ If the pipeline is not specified, the underscore-prefixed fields will actually b === Details [discrete#ingest-pipeline-search-details-generic-reference] -==== `ent-search-generic-ingestion` Reference +==== `search-default-ingestion` Reference You can access this pipeline with the <> or via Kibana's < Ingest Pipelines>> UI. @@ -149,7 +149,7 @@ If you want to make customizations, we recommend you utilize index-specific pipe [discrete#ingest-pipeline-search-details-generic-reference-params] ===== Control flow parameters -The `ent-search-generic-ingestion` pipeline does not always run all processors. +The `search-default-ingestion` pipeline does not always run all processors. It utilizes a feature of ingest pipelines to <> based on the contents of each individual document. * `_extract_binary_content` - if this field is present and has a value of `true` on a source document, the pipeline will attempt to run the `attachment`, `set_body`, and `remove_replacement_chars` processors. @@ -167,8 +167,8 @@ See <>. ==== Index-specific ingest pipelines In the Kibana UI for your index, by clicking on the Pipelines tab, then *Settings > Copy and customize*, you can quickly generate 3 pipelines which are specific to your index. -These 3 pipelines replace `ent-search-generic-ingestion` for the index. -There is nothing lost in this action, as the `` pipeline is a superset of functionality over the `ent-search-generic-ingestion` pipeline. +These 3 pipelines replace `search-default-ingestion` for the index. +There is nothing lost in this action, as the `` pipeline is a superset of functionality over the `search-default-ingestion` pipeline. [IMPORTANT] ==== @@ -179,7 +179,7 @@ Refer to the Elastic subscriptions pages for https://www.elastic.co/subscription [discrete#ingest-pipeline-search-details-specific-reference] ===== `` Reference -This pipeline looks and behaves a lot like the <>, but with <>. +This pipeline looks and behaves a lot like the <>, but with <>. [WARNING] ========================= @@ -197,7 +197,7 @@ If you want to make customizations, we recommend you utilize <>, the index-specific pipeline also defines: +In addition to the processors inherited from the <>, the index-specific pipeline also defines: * `index_ml_inference_pipeline` - this uses the <> processor to run the `@ml-inference` pipeline. This processor will only be run if the source document includes a `_run_ml_inference` field with the value `true`. @@ -206,7 +206,7 @@ In addition to the processors inherited from the <` pipeline does not always run all processors. +Like the `search-default-ingestion` pipeline, the `` pipeline does not always run all processors. In addition to the `_extract_binary_content` and `_reduce_whitespace` control flow parameters, the `` pipeline also supports: * `_run_ml_inference` - if this field is present and has a value of `true` on a source document, the pipeline will attempt to run the `index_ml_inference_pipeline` processor. @@ -220,7 +220,7 @@ See <>. ===== `@ml-inference` Reference This pipeline is empty to start (no processors), but can be added to via the Kibana UI either through the Pipelines tab of your index, or from the *Stack Management > Ingest Pipelines* page. -Unlike the `ent-search-generic-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". +Unlike the `search-default-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". It's possible to add one or more ML inference pipelines to an index in the *Content* UI. This pipeline will serve as a container for all of the ML inference pipelines configured for the index. @@ -241,7 +241,7 @@ The `monitor_ml` Elasticsearch cluster permission is required in order to manage This pipeline is empty to start (no processors), but can be added to via the Kibana UI either through the Pipelines tab of your index, or from the *Stack Management > Ingest Pipelines* page. -Unlike the `ent-search-generic-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". +Unlike the `search-default-ingestion` pipeline and the `` pipeline, this pipeline is NOT "managed". You are encouraged to make additions and edits to this pipeline, provided its name remains the same. This provides a convenient hook from which to add custom processing and transformations for your data. @@ -272,9 +272,12 @@ extraction. These changes should be re-applied to each index's `@custom` pipeline in order to ensure a consistent data processing experience. In 8.5+, the <> is required *in addition* to the configurations mentioned in the {enterprise-search-ref}/crawler-managing.html#crawler-managing-binary-content[Elastic web crawler Guide]. -* `ent-search-generic-ingestion` - Since 8.5, Native Connectors, Connector Clients, and new (>8.4) Elastic web crawler indices will all make use of this pipeline by default. +* `ent-search-generic-ingestion` - Since 8.5, Native Connectors, Connector Clients, and new (>8.4) Elastic web crawler indices all made use of this pipeline by default. + This pipeline evolved into the `search-default-ingestion` pipeline. + +* `search-default-ingestion` - Since 9.0, Connectors have made use of this pipeline by default. You can <> above. - As this pipeline is "managed", any modifications that were made to `app_search_crawler` and/or `ent_search_crawler` should NOT be made to `ent-search-generic-ingestion`. + As this pipeline is "managed", any modifications that were made to `app_search_crawler` and/or `ent_search_crawler` should NOT be made to `search-default-ingestion`. Instead, if such customizations are desired, you should utilize <>, placing all modifications in the `@custom` pipeline(s). ============= diff --git a/docs/reference/ingest/search-nlp-tutorial.asciidoc b/docs/reference/ingest/search-nlp-tutorial.asciidoc index afdceeeb8bac2..b23a15c96b1a2 100644 --- a/docs/reference/ingest/search-nlp-tutorial.asciidoc +++ b/docs/reference/ingest/search-nlp-tutorial.asciidoc @@ -164,8 +164,8 @@ Now it's time to create an inference pipeline. 1. From the overview page for your `search-photo-comments` index in "Search", click the *Pipelines* tab. By default, Elasticsearch does not create any index-specific ingest pipelines. -2. Because we want to customize these pipelines, we need to *Copy and customize* the `ent-search-generic-ingestion` ingest pipeline. -Find this option above the settings for the `ent-search-generic-ingestion` ingest pipeline. +2. Because we want to customize these pipelines, we need to *Copy and customize* the `search-default-ingestion` ingest pipeline. +Find this option above the settings for the `search-default-ingestion` ingest pipeline. This will create two new index-specific ingest pipelines. Next, we'll add an inference pipeline. diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index e0100b1c5640b..2fd0722bcd660 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -260,7 +260,7 @@ Refer to <> for a hands-on examp *{esql}* is a new piped query language and compute engine which was first added in version *8.11*. -{esql} does not yet support all the features of Query DSL, like full-text search and semantic search. +{esql} does not yet support all the features of Query DSL. Look forward to new {esql} features and functionalities in each release. Refer to <> for a full overview of the query languages available in {es}. @@ -280,7 +280,7 @@ The <> accepts queries written in Query DS Query DSL support a wide range of search techniques, including the following: -* <>: Search text that has been analyzed and indexed to support phrase or proximity queries, fuzzy matches, and more. +* <>: Search text that has been analyzed and indexed to support phrase or proximity queries, fuzzy matches, and more. * <>: Search for exact matches using `keyword` fields. * <>: Search `semantic_text` fields using dense or sparse vector search on embeddings generated in your {es} cluster. * <>: Search for similar dense vectors using the kNN algorithm for embeddings generated outside of {es}. @@ -328,8 +328,7 @@ directly executed within {es} itself. The <> accepts queries written in {esql} syntax. -Today, it supports a subset of the features available in Query DSL, like aggregations, filters, and transformations. -It does not yet support full-text search or semantic search. +Today, it supports a subset of the features available in Query DSL, but it is rapidly evolving. It comes with a comprehensive set of <> for working with data and has robust integration with {kib}'s Discover, dashboards and visualizations. @@ -397,51 +396,18 @@ geographic location of your users and your resources. [[use-multiple-nodes-shards]] ==== Use multiple nodes and shards -[NOTE] -==== -Nodes and shards are what make {es} distributed and scalable. +When you move to production, you need to introduce multiple nodes and shards to your cluster. Nodes and shards are what make {es} distributed and scalable. The size and number of these nodes and shards depends on your data, your use case, and your budget. -These concepts aren’t essential if you’re just getting started. How you <> in production determines what you need to know: +These concepts aren't essential if you're just getting started. How you <> in production determines what you need to know: * *Self-managed {es}*: You are responsible for setting up and managing nodes, clusters, shards, and replicas. This includes managing the underlying infrastructure, scaling, and ensuring high availability through failover and backup strategies. * *Elastic Cloud*: Elastic can autoscale resources in response to workload changes. Choose from different deployment types to apply sensible defaults for your use case. A basic understanding of nodes, shards, and replicas is still important. -* *Elastic Cloud Serverless*: You don’t need to worry about nodes, shards, or replicas. These resources are 100% automated +* *Elastic Cloud Serverless*: You don't need to worry about nodes, shards, or replicas. These resources are 100% automated on the serverless platform, which is designed to scale with your workload. -==== - -You can add servers (_nodes_) to a cluster to increase capacity, and {es} automatically distributes your data and query load -across all of the available nodes. - -Elastic is able to distribute your data across nodes by subdividing an index into _shards_. Each index in {es} is a grouping -of one or more physical shards, where each shard is a self-contained Lucene index containing a subset of the documents in -the index. By distributing the documents in an index across multiple shards, and distributing those shards across multiple -nodes, {es} increases indexing and query capacity. - -There are two types of shards: _primaries_ and _replicas_. Each document in an index belongs to one primary shard. A replica -shard is a copy of a primary shard. Replicas maintain redundant copies of your data across the nodes in your cluster. -This protects against hardware failure and increases capacity to serve read requests like searching or retrieving a document. - -[TIP] -==== -The number of primary shards in an index is fixed at the time that an index is created, but the number of replica shards can -be changed at any time, without interrupting indexing or query operations. -==== - -Shard copies in your cluster are automatically balanced across nodes to provide scale and high availability. All nodes are -aware of all the other nodes in the cluster and can forward client requests to the appropriate node. This allows {es} -to distribute indexing and query load across the cluster. - -If you’re exploring {es} for the first time or working in a development environment, then you can use a cluster with a single node and create indices -with only one shard. However, in a production environment, you should build a cluster with multiple nodes and indices -with multiple shards to increase performance and resilience. - -// TODO - diagram -To learn about optimizing the number and size of shards in your cluster, refer to <>. -To learn about how read and write operations are replicated across shards and shard copies, refer to <>. -To adjust how shards are allocated and balanced across nodes, refer to <>. +Learn more about <>. [discrete] [[ccr-disaster-recovery-geo-proximity]] diff --git a/docs/reference/licensing/delete-license.asciidoc b/docs/reference/licensing/delete-license.asciidoc index 0f0aac416ccf8..fd9236664e1c0 100644 --- a/docs/reference/licensing/delete-license.asciidoc +++ b/docs/reference/licensing/delete-license.asciidoc @@ -5,6 +5,12 @@ Delete license ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API enables you to delete licensing information. [discrete] diff --git a/docs/reference/licensing/get-basic-status.asciidoc b/docs/reference/licensing/get-basic-status.asciidoc index 44bc246e72bc3..0e354e0dc07cd 100644 --- a/docs/reference/licensing/get-basic-status.asciidoc +++ b/docs/reference/licensing/get-basic-status.asciidoc @@ -5,6 +5,12 @@ Get basic status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API enables you to check the status of your basic license. [discrete] diff --git a/docs/reference/licensing/get-license.asciidoc b/docs/reference/licensing/get-license.asciidoc index 816bdd36ff4bd..1b50a5be8fa20 100644 --- a/docs/reference/licensing/get-license.asciidoc +++ b/docs/reference/licensing/get-license.asciidoc @@ -5,6 +5,12 @@ Get license ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API enables you to retrieve licensing information. [discrete] diff --git a/docs/reference/licensing/get-trial-status.asciidoc b/docs/reference/licensing/get-trial-status.asciidoc index 912d90bde850f..b0ae110faa8cc 100644 --- a/docs/reference/licensing/get-trial-status.asciidoc +++ b/docs/reference/licensing/get-trial-status.asciidoc @@ -5,6 +5,12 @@ Get trial status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + Enables you to check the status of your trial. [discrete] diff --git a/docs/reference/licensing/index.asciidoc b/docs/reference/licensing/index.asciidoc index a1dfd398acfe7..12df7b606c5bd 100644 --- a/docs/reference/licensing/index.asciidoc +++ b/docs/reference/licensing/index.asciidoc @@ -2,6 +2,12 @@ [[licensing-apis]] == Licensing APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + You can use the following APIs to manage your licenses: * <> diff --git a/docs/reference/licensing/start-basic.asciidoc b/docs/reference/licensing/start-basic.asciidoc index 8ae25e1940a62..291a0ec452caf 100644 --- a/docs/reference/licensing/start-basic.asciidoc +++ b/docs/reference/licensing/start-basic.asciidoc @@ -5,6 +5,12 @@ Start basic ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + This API starts an indefinite basic license. [discrete] diff --git a/docs/reference/licensing/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc index 4207dbb092fb0..03b6c3ce40edd 100644 --- a/docs/reference/licensing/update-license.asciidoc +++ b/docs/reference/licensing/update-license.asciidoc @@ -5,6 +5,12 @@ Update license ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-license[Licensing APIs]. +-- + Updates the license for your {es} cluster. [[update-license-api-request]] diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index babe4f508b5f0..e5155b7d4ce5b 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -180,6 +180,8 @@ include::types/rank-feature.asciidoc[] include::types/rank-features.asciidoc[] +include::types/rank-vectors.asciidoc[] + include::types/search-as-you-type.asciidoc[] include::types/semantic-text.asciidoc[] diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 199a59a5b143c..c16b979043a57 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -1,4 +1,3 @@ -[role="xpack"] [[dense-vector]] === Dense vector field type ++++ diff --git a/docs/reference/mapping/types/rank-vectors.asciidoc b/docs/reference/mapping/types/rank-vectors.asciidoc new file mode 100644 index 0000000000000..a718a5e47ec85 --- /dev/null +++ b/docs/reference/mapping/types/rank-vectors.asciidoc @@ -0,0 +1,201 @@ +[role="xpack"] +[[rank-vectors]] +=== Rank Vectors +++++ + Rank Vectors +++++ +experimental::[] + +The `rank_vectors` field type enables late-interaction dense vector scoring in Elasticsearch. The number of vectors +per field can vary, but they must all share the same number of dimensions and element type. + +The purpose of vectors stored in this field is second order ranking documents with max-sim similarity. + +Here is a simple example of using this field with `float` elements. + +[source,console] +-------------------------------------------------- +PUT my-rank-vectors-float +{ + "mappings": { + "properties": { + "my_vector": { + "type": "rank_vectors" + } + } + } +} + +PUT my-rank-vectors-float/_doc/1 +{ + "my_vector" : [[0.5, 10, 6], [-0.5, 10, 10]] +} + +-------------------------------------------------- +// TESTSETUP + +In addition to the `float` element type, `byte` and `bit` element types are also supported. + +Here is an example of using this field with `byte` elements. + +[source,console] +-------------------------------------------------- +PUT my-rank-vectors-byte +{ + "mappings": { + "properties": { + "my_vector": { + "type": "rank_vectors", + "element_type": "byte" + } + } + } +} + +PUT my-rank-vectors-byte/_doc/1 +{ + "my_vector" : [[1, 2, 3], [4, 5, 6]] +} +-------------------------------------------------- + +Here is an example of using this field with `bit` elements. + +[source,console] +-------------------------------------------------- +PUT my-rank-vectors-bit +{ + "mappings": { + "properties": { + "my_vector": { + "type": "rank_vectors", + "element_type": "bit" + } + } + } +} + +POST /my-rank-vectors-bit/_bulk?refresh +{"index": {"_id" : "1"}} +{"my_vector": [127, -127, 0, 1, 42]} +{"index": {"_id" : "2"}} +{"my_vector": "8100012a7f"} +-------------------------------------------------- + +[role="child_attributes"] +[[rank-vectors-params]] +==== Parameters for rank vectors fields + +The `rank_vectors` field type supports the following parameters: + +[[rank-vectors-element-type]] +`element_type`:: +(Optional, string) +The data type used to encode vectors. The supported data types are +`float` (default), `byte`, and bit. + +.Valid values for `element_type` +[%collapsible%open] +==== +`float`::: +indexes a 4-byte floating-point +value per dimension. This is the default value. + +`byte`::: +indexes a 1-byte integer value per dimension. + +`bit`::: +indexes a single bit per dimension. Useful for very high-dimensional vectors or models that specifically support bit vectors. +NOTE: when using `bit`, the number of dimensions must be a multiple of 8 and must represent the number of bits. + +==== + +`dims`:: +(Optional, integer) +Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, +it will be set to the length of the first vector added to the field. + +[[rank-vectors-synthetic-source]] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will work to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + +`rank_vectors` fields support <> . + +[[rank-vectors-scoring]] +==== Scoring with rank vectors + +Rank vectors can be accessed and used in <>. + +For example, the following query scores documents based on the maxSim similarity between the query vector and the vectors stored in the `my_vector` field: + +[source,console] +-------------------------------------------------- +GET my-rank-vectors-float/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "maxSimDotProduct(params.query_vector, 'my_vector')", + "params": { + "query_vector": [[0.5, 10, 6], [-0.5, 10, 10]] + } + } + } + } +} +-------------------------------------------------- + +Additionally, asymmetric similarity functions can be used to score against `bit` vectors. For example, the following query scores documents based on the maxSimDotProduct similarity between a floating point query vector and bit vectors stored in the `my_vector` field: + +[source,console] +-------------------------------------------------- +PUT my-rank-vectors-bit +{ + "mappings": { + "properties": { + "my_vector": { + "type": "rank_vectors", + "element_type": "bit" + } + } + } +} + +POST /my-rank-vectors-bit/_bulk?refresh +{"index": {"_id" : "1"}} +{"my_vector": [127, -127, 0, 1, 42]} +{"index": {"_id" : "2"}} +{"my_vector": "8100012a7f"} + +GET my-rank-vectors-bit/_search +{ + "query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "maxSimDotProduct(params.query_vector, 'my_vector')", + "params": { + "query_vector": [ + [0.35, 0.77, 0.95, 0.15, 0.11, 0.08, 0.58, 0.06, 0.44, 0.52, 0.21, + 0.62, 0.65, 0.16, 0.64, 0.39, 0.93, 0.06, 0.93, 0.31, 0.92, 0.0, + 0.66, 0.86, 0.92, 0.03, 0.81, 0.31, 0.2 , 0.92, 0.95, 0.64, 0.19, + 0.26, 0.77, 0.64, 0.78, 0.32, 0.97, 0.84] + ] <1> + } + } + } + } +} +-------------------------------------------------- +<1> Note that the query vector has 40 elements, matching the number of bits in the bit vectors. + diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index 96dc402e10c60..4514c8b6756a8 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -13,6 +13,7 @@ Long passages are <> to smaller secti The `semantic_text` field type specifies an inference endpoint identifier that will be used to generate embeddings. You can create the inference endpoint by using the <>. This field type and the <> type make it simpler to perform semantic search on your data. +The `semantic_text` field type may also be queried with <>, <> or <> queries. If you don’t specify an inference endpoint, the `inference_id` field defaults to `.elser-2-elasticsearch`, a preconfigured endpoint for the elasticsearch service. diff --git a/docs/reference/migration/apis/deprecation.asciidoc b/docs/reference/migration/apis/deprecation.asciidoc index 67b4c113af2bc..71453a001eeeb 100644 --- a/docs/reference/migration/apis/deprecation.asciidoc +++ b/docs/reference/migration/apis/deprecation.asciidoc @@ -5,6 +5,12 @@ Deprecation info ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-migration[Migration APIs]. +-- + include::{es-ref-dir}/migration/apis/shared-migration-apis-tip.asciidoc[] The deprecation API is to be used to retrieve information about different diff --git a/docs/reference/migration/apis/feature-migration.asciidoc b/docs/reference/migration/apis/feature-migration.asciidoc index e38639ac44531..717a46442bd92 100644 --- a/docs/reference/migration/apis/feature-migration.asciidoc +++ b/docs/reference/migration/apis/feature-migration.asciidoc @@ -5,6 +5,12 @@ Feature migration ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-migration[Migration APIs]. +-- + include::{es-ref-dir}/migration/apis/shared-migration-apis-tip.asciidoc[] Version upgrades sometimes require changes to how features store configuration diff --git a/docs/reference/migration/migration.asciidoc b/docs/reference/migration/migration.asciidoc index ffb2ca7a7859d..57b6c88aefea4 100644 --- a/docs/reference/migration/migration.asciidoc +++ b/docs/reference/migration/migration.asciidoc @@ -2,6 +2,12 @@ [[migration-api]] == Migration APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-migration[Migration APIs]. +-- + The migration APIs power {kib}'s **Upgrade Assistant** feature. include::apis/shared-migration-apis-tip.asciidoc[] diff --git a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index f0cb968e082c5..865213dad3f50 100644 --- a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -5,6 +5,12 @@ Close jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Closes one or more {anomaly-jobs}. [[ml-close-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc index b80a248038aea..d67f7cce9d1f6 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-event.asciidoc @@ -5,6 +5,12 @@ Delete events from calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes scheduled events from a calendar. [[ml-delete-calendar-event-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc index 6720e236fd635..17a581f964f54 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar-job.asciidoc @@ -5,6 +5,12 @@ Delete jobs from calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes {anomaly-jobs} from a calendar. [[ml-delete-calendar-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc index 6684366c6f336..23ce74c8fb310 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-calendar.asciidoc @@ -5,6 +5,12 @@ Delete calendars ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes a calendar. [[ml-delete-calendar-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc index 64a1e4c336fe6..61d3624ef50dc 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-datafeed.asciidoc @@ -7,6 +7,12 @@ Delete {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes an existing {dfeed}. [[ml-delete-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc index a8c1279ac1b47..bee453f8d2082 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-expired-data.asciidoc @@ -5,6 +5,12 @@ Delete expired data ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes expired and unused machine learning data. [[ml-delete-expired-data-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc index 4b41347543e8e..84346ae012300 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-filter.asciidoc @@ -5,6 +5,12 @@ Delete filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes a filter. [[ml-delete-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc index 74e6ce27084ad..ee3167036f66d 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-forecast.asciidoc @@ -5,6 +5,12 @@ Delete forecasts ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes forecasts from a {ml} job. [[ml-delete-forecast-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc index 1bbe07fd44f49..595c5acc041d6 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc @@ -5,6 +5,12 @@ Delete jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes an existing {anomaly-job}. [[ml-delete-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc index ad10de7a2ba0f..345b933a19622 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-snapshot.asciidoc @@ -5,6 +5,12 @@ Delete model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Deletes an existing model snapshot. [[ml-delete-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc b/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc index 1cc4df42f083d..8be8e8df72d4e 100644 --- a/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/estimate-model-memory.asciidoc @@ -5,6 +5,12 @@ Estimate model memory ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Makes an estimation of the memory usage for an {anomaly-job} model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. diff --git a/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc index 68ff601749b4b..dcf4d7a6b1d60 100644 --- a/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/flush-job.asciidoc @@ -5,6 +5,12 @@ Flush jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Forces any buffered data to be processed by the job. [[ml-flush-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc index 3e6067ab05857..ac4e3eb4ed29a 100644 --- a/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/forecast.asciidoc @@ -5,6 +5,12 @@ Forecast jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Predicts the future behavior of a time series by using its historical behavior. [[ml-forecast-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc index bca839d1db318..d33d6f64021a3 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc @@ -5,6 +5,12 @@ Get buckets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves {anomaly-job} results for one or more buckets. [[ml-get-bucket-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc index fc06e286bf46c..565a553e18974 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc @@ -5,6 +5,12 @@ Get scheduled events ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves information about the scheduled events in calendars. [[ml-get-calendar-event-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc index b2c46bbe16c0e..bf00458299633 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc @@ -5,6 +5,12 @@ Get calendars ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves configuration information for calendars. [[ml-get-calendar-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc index 33de5e0f71a08..0f5fa1504c9fe 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc @@ -5,6 +5,12 @@ Get categories ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves {anomaly-job} results for one or more categories. [[ml-get-category-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc index 0a8f1e802715e..f9130bb78f5e7 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc @@ -7,6 +7,12 @@ Get {dfeed} statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves usage information for {ml-docs}/ml-ad-run-jobs.html#ml-ad-datafeeds[{dfeeds}]. [[ml-get-datafeed-stats-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc index a986e2220f928..3b86c5cc22a87 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc @@ -7,6 +7,12 @@ Get {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves configuration information for {dfeeds}. [[ml-get-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc index f73dcd236f1af..dc167238701cd 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-filter.asciidoc @@ -5,6 +5,12 @@ Get filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves filters. [[ml-get-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc index 31489e361a848..f2e26344207a8 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc @@ -5,6 +5,12 @@ Get influencers ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves {anomaly-job} results for one or more influencers. [[ml-get-influencer-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc index 0939282a75916..32a9b67258b2d 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc @@ -7,6 +7,12 @@ Get model snapshot upgrade statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves usage information for {anomaly-job} model snapshot upgrades. [[ml-get-job-model-snapshot-upgrade-stats-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc index 627109d11e11e..1df41abdda27b 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc @@ -5,6 +5,12 @@ Get job statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves usage information for {ml-docs}/ml-ad-overview.html[{anomaly-jobs}]. [[ml-get-job-stats-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc index 33692fd182fa7..74fd1c7ecb54a 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc @@ -5,6 +5,12 @@ Get jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves configuration information for {anomaly-jobs}. [[ml-get-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc index b581b5c3a2eb2..45ae61647bfda 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc @@ -5,6 +5,12 @@ Get overall buckets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves overall bucket results that summarize the bucket results of multiple {anomaly-jobs}. diff --git a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc index e74ab3ecb4b12..266ab64ba5e19 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc @@ -5,6 +5,12 @@ Get records ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves anomaly records for an {anomaly-job}. [[ml-get-record-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc index d94bd4060854a..47fdc3db46b60 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc @@ -5,6 +5,12 @@ Get model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Retrieves information about model snapshots. [[ml-get-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc b/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc index 856232c933432..951f9522db74b 100644 --- a/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/ml-ad-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-ad-apis]] = {ml-cap} {anomaly-detect} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + You can use the following APIs to perform {ml} {anomaly-detect} activities. See also <>, <>, <>. diff --git a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc index 385f672f467f6..9a5153952b5a2 100644 --- a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc @@ -5,6 +5,12 @@ Open jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Opens one or more {anomaly-jobs}. [[ml-open-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc index 46ffeab694fa3..429dca427a9d9 100644 --- a/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-calendar-event.asciidoc @@ -5,6 +5,12 @@ Add events to calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Posts scheduled events in a calendar. [[ml-post-calendar-event-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc index 931efcf8c2a52..83e5dd77e814a 100644 --- a/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/post-data.asciidoc @@ -7,6 +7,12 @@ deprecated::[7.11.0, "Posting data directly to anomaly detection jobs is deprecated, in a future major version a <> will be required."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Sends data to an anomaly detection job for analysis. [[ml-post-data-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc index 243cd2a5f32a3..c57965401e074 100644 --- a/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/preview-datafeed.asciidoc @@ -7,6 +7,12 @@ Preview {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Previews a {dfeed}. [[ml-preview-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc index 0c19a08cbd74b..612f472bba37d 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar-job.asciidoc @@ -5,6 +5,12 @@ Add jobs to calendar ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Adds an {anomaly-job} to a calendar. [[ml-put-calendar-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc index fd2b58a31737a..69a4498cc29da 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-calendar.asciidoc @@ -5,6 +5,12 @@ Create calendars ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates a calendar. [[ml-put-calendar-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 47e3059666d76..18260d3538e4b 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -7,6 +7,12 @@ Create {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates a {dfeed}. [[ml-put-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc index b50ba8cb1e23b..3f3157f008e9f 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-filter.asciidoc @@ -5,6 +5,12 @@ Create filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates a filter. [[ml-put-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index 7bf02e7a0dd6e..30a1039f93db0 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -5,6 +5,12 @@ Create jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Instantiates an {anomaly-job}. [[ml-put-job-request]] @@ -551,4 +557,3 @@ The API returns the following results: // TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/] // TESTRESPONSE[s/1656087283340/$body.$_path/] // TESTRESPONSE[s/"superuser"/"_es_test_root"/] -// TESTRESPONSE[s/"ignore_throttled" : true/"ignore_throttled" : true,"failure_store":"exclude"/] diff --git a/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc index 9009d634a2e9c..2d9c4696cab4b 100644 --- a/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/reset-job.asciidoc @@ -5,6 +5,12 @@ Reset jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Resets an existing {anomaly-job}. [[ml-reset-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc index c8d7a27ee2047..792a102ff2953 100644 --- a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc @@ -5,6 +5,12 @@ Revert model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Reverts to a specific snapshot. [[ml-revert-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc index b54c80133d7dc..4281b3a02b826 100644 --- a/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/start-datafeed.asciidoc @@ -7,6 +7,12 @@ Start {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Starts one or more {dfeeds}. [[ml-start-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc index bc15a1de8a056..91e5087715fb7 100644 --- a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc @@ -7,6 +7,12 @@ Stop {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Stops one or more {dfeeds}. [[ml-stop-datafeed-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index b3920d9d4f809..c03c921e7875a 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -7,6 +7,12 @@ Update {dfeeds} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates certain properties of a {dfeed}. diff --git a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc index a4221c37a438e..8fb5376f4acbc 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc @@ -5,6 +5,12 @@ Update filters ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates the description of a filter, adds items, or removes items. [[ml-update-filter-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc index ee13247fc8838..bed013c113c01 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc @@ -5,6 +5,12 @@ Update jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates certain properties of an {anomaly-job}. [[ml-update-job-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc index f8c0384860029..54276f62ff094 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc @@ -5,6 +5,12 @@ Update model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Updates certain properties of a snapshot. [[ml-update-snapshot-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc index 3a74e3b2296df..2538b4958ada1 100644 --- a/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc @@ -5,6 +5,12 @@ Upgrade model snapshots ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Upgrades an {anomaly-detect} model snapshot to the latest major version. NOTE: From {es} 8.10.0, a new version number is used to diff --git a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc index c71673be7dc00..820845d0fd233 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc @@ -5,6 +5,12 @@ Validate detectors ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Validates detector configuration information. [[ml-valid-detector-request]] diff --git a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc index c77623566d4c8..382199e315695 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc @@ -5,6 +5,12 @@ Validate jobs ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-anomaly[{ml-cap} {anomaly-detect} APIs]. +-- + Validates {anomaly-job} configuration information. [[ml-valid-job-request]] diff --git a/docs/reference/ml/common/apis/get-ml-info.asciidoc b/docs/reference/ml/common/apis/get-ml-info.asciidoc index 104375bd641c8..62fae2a9a9184 100644 --- a/docs/reference/ml/common/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/common/apis/get-ml-info.asciidoc @@ -7,6 +7,12 @@ Get {ml} info ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + Returns defaults and limits used by machine learning. [[get-ml-info-request]] diff --git a/docs/reference/ml/common/apis/get-ml-memory.asciidoc b/docs/reference/ml/common/apis/get-ml-memory.asciidoc index 81e0f59a97e58..8c1cf620b70eb 100644 --- a/docs/reference/ml/common/apis/get-ml-memory.asciidoc +++ b/docs/reference/ml/common/apis/get-ml-memory.asciidoc @@ -7,6 +7,12 @@ Get {ml} memory stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + Returns information on how {ml} is using memory. [[get-ml-memory-request]] diff --git a/docs/reference/ml/common/apis/ml-apis.asciidoc b/docs/reference/ml/common/apis/ml-apis.asciidoc index c4349f3eb7366..95f102ceecfa8 100644 --- a/docs/reference/ml/common/apis/ml-apis.asciidoc +++ b/docs/reference/ml/common/apis/ml-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-apis]] = {ml-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + You can use the following APIs to retrieve information related to the {stack-ml-features}: diff --git a/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc index 2469753382f9b..52adcac9bacdc 100644 --- a/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc +++ b/docs/reference/ml/common/apis/set-upgrade-mode.asciidoc @@ -5,6 +5,12 @@ Set upgrade mode ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml[{ml-cap} APIs]. +-- + Sets a cluster wide upgrade_mode setting that prepares {ml} indices for an upgrade. diff --git a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc index b505da570244f..a6b5058eab887 100644 --- a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc @@ -6,6 +6,12 @@ Delete {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Deletes an existing {dfanalytics-job}. diff --git a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc index 8d31fcd0107d9..fb0b6da0bc805 100644 --- a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Evaluate {dfanalytics} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Evaluates the {dfanalytics} for an annotated index. diff --git a/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc index 0ee7ec5634582..566cfc3ba3596 100644 --- a/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/explain-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Explain {dfanalytics} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Explains a {dataframe-analytics-config}. diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index 9037819c9f236..960388d8d4e33 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -6,6 +6,12 @@ Get {dfanalytics-jobs} stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Retrieves usage information for {ml-docs}/ml-dfanalytics.html[{dfanalytics-jobs}]. diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index c2a4caa981da1..6e65bdfe83a25 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -5,6 +5,12 @@ Get {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Retrieves configuration information for {dfanalytics-jobs}. diff --git a/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc b/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc index ba16f728e4b9a..a90f42b7b9757 100644 --- a/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc +++ b/docs/reference/ml/df-analytics/apis/ml-df-analytics-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-df-analytics-apis]] = {ml-cap} {dfanalytics} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + You can use the following APIs to perform {ml} {dfanalytics} activities: * <> diff --git a/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc index 2c61c3263992c..896cf5ca8eb8a 100644 --- a/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/preview-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Preview {dfanalytics} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Previews the features used by a {dataframe-analytics-config}. diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 54cbe78b34452..8c2e95e0744ed 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -6,6 +6,12 @@ Create {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Instantiates a {dfanalytics-job}. diff --git a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc index 70e996ef8dd0a..7409c443ff802 100644 --- a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Start {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Starts a {dfanalytics-job}. diff --git a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc index 2fa3bc4413d7a..95d39b98e3bbd 100644 --- a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc @@ -7,6 +7,12 @@ Stop {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Stops one or more {dfanalytics-jobs}. diff --git a/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc index 49cca176be69b..35e871761b21a 100644 --- a/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/update-dfanalytics.asciidoc @@ -6,6 +6,12 @@ Update {dfanalytics-jobs} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-data-frame[{ml-cap} {dfanalytics} APIs]. +-- + Updates an existing {dfanalytics-job}. diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index 4948db48664ed..af384c2c90011 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -1167,6 +1167,12 @@ tag::inference-config-text-embedding-size[] The number of dimensions in the embedding vector produced by the model. end::inference-config-text-embedding-size[] +tag::inference-config-text-expansion[] +The text expansion task works with sparse embedding models to transform an input sequence +into a vector of weighted tokens. These embeddings capture semantic meanings and +context and can be used in a <> field for powerful insights. +end::inference-config-text-expansion[] + tag::inference-config-text-similarity[] Text similarity takes an input sequence and compares it with another input sequence. This is commonly referred to as cross-encoding. This task is useful for ranking document text when comparing it to another provided text input. diff --git a/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc index f24379705fc75..1e1639ab5eee2 100644 --- a/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc +++ b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc @@ -6,6 +6,12 @@ Clear trained model deployment cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Clears the {infer} cache on all nodes where the deployment is assigned. [[clear-trained-model-deployment-cache-request]] diff --git a/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc b/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc index 1b67a642f7b7a..22e0422df9fb7 100644 --- a/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc +++ b/docs/reference/ml/trained-models/apis/delete-trained-models-aliases.asciidoc @@ -6,6 +6,12 @@ Delete trained model aliases ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Deletes a trained model alias. diff --git a/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc index 1b54343d1f1c9..d738c6630a78f 100644 --- a/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/delete-trained-models.asciidoc @@ -6,6 +6,12 @@ Delete trained models ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Deletes an existing trained {infer} model. diff --git a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc index b55f022a5d168..82263c98e9112 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models-stats.asciidoc @@ -6,6 +6,12 @@ Get trained models stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Retrieves usage information for trained models. diff --git a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc index ffb46eaa1fe2e..4f583319ca383 100644 --- a/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/get-trained-models.asciidoc @@ -6,6 +6,12 @@ Get trained models ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Retrieves configuration information about {ml-docs}/ml-nlp-deploy-models.html[{infer} trained models]. diff --git a/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc index 83bc56d18df63..8bb48eeb5cbf2 100644 --- a/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc @@ -6,6 +6,12 @@ Infer trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Evaluates a trained model. deprecated::[8.3.0,Replaced by <>.] diff --git a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc index 7acbc0bd23859..93b8aa3422abc 100644 --- a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc +++ b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc @@ -6,6 +6,12 @@ Infer trained model ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Evaluates a trained model. The model may be any supervised model either trained by {dfanalytics} or imported. diff --git a/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc b/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc index 83ef3c49fb5ed..5b0fc2b83afa5 100644 --- a/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc +++ b/docs/reference/ml/trained-models/apis/ml-trained-models-apis.asciidoc @@ -2,6 +2,12 @@ [[ml-df-trained-models-apis]] = {ml-cap} trained model APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + You can use the following APIs to perform model management operations: * <> diff --git a/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc index d1da29abffcd3..857b86892a6b9 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc @@ -6,6 +6,12 @@ Create part of a trained model ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Creates part of a trained model definition. [[ml-put-trained-model-definition-part-request]] diff --git a/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc index 2fdf86259388f..39f93f882235a 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc @@ -6,6 +6,12 @@ Create trained model vocabulary ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Creates a trained model vocabulary. This is supported only for natural language processing (NLP) models. diff --git a/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc index d8c00efe28015..3fcc7f5bb2da4 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models-aliases.asciidoc @@ -6,6 +6,11 @@ Create or update trained model aliases ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- Creates or updates a trained model alias. diff --git a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc index 32265af5f795b..ccd76b7095762 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-models.asciidoc @@ -3,11 +3,15 @@ = Create trained models API [subs="attributes"] ++++ - Create trained models - ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Creates a trained model. WARNING: Models created in version 7.8.0 are not backwards compatible @@ -391,10 +395,10 @@ the model definition is not supplied. (Required, object) The default configuration for inference. This can be: `regression`, `classification`, `fill_mask`, `ner`, `question_answering`, -`text_classification`, `text_embedding` or `zero_shot_classification`. +`text_classification`, `text_embedding`, `text_expansion` or `zero_shot_classification`. If `regression` or `classification`, it must match the `target_type` of the underlying `definition.trained_model`. If `fill_mask`, `ner`, -`question_answering`, `text_classification`, or `text_embedding`; the +`question_answering`, `text_classification`, `text_embedding` or `text_expansion`; the `model_type` must be `pytorch`. + .Properties of `inference_config` @@ -588,6 +592,25 @@ Refer to <> to review the properties of the `tokenization` object. ===== +`text_expansion`::: +(Object, optional) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-expansion] ++ +.Properties of text_expansion inference +[%collapsible%open] +===== +`results_field`:::: +(Optional, string) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] + +`tokenization`:::: +(Optional, object) +include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] ++ +Refer to <> to review the properties of the +`tokenization` object. +===== + `text_similarity`::: (Object, optional) include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity] diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index 6f7e2a4d9f988..7eaa149976652 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -6,6 +6,12 @@ Start trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Starts a new trained model deployment. [[start-trained-model-deployment-request]] @@ -138,8 +144,8 @@ normal priority deployments. Controls how many inference requests are allowed in the queue at a time. Every machine learning node in the cluster where the model can be allocated has a queue of this size; when the number of requests exceeds the total value, -new requests are rejected with a 429 error. Defaults to 1024. Max allowed value -is 1000000. +new requests are rejected with a 429 error. Defaults to 10000. Max allowed value +is 100000. `threads_per_allocation`:: (Optional, integer) @@ -173,7 +179,7 @@ The API returns the following results: "model_bytes": 265632637, "threads_per_allocation" : 1, "number_of_allocations" : 1, - "queue_capacity" : 1024, + "queue_capacity" : 10000, "priority": "normal" }, "routing_table": { @@ -229,4 +235,4 @@ POST _ml/trained_models/my_model/deployment/_start?deployment_id=my_model_for_se } } -------------------------------------------------- -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] diff --git a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc index 622b440622cd3..2f179e2391b78 100644 --- a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc @@ -6,6 +6,12 @@ Stop trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Stops a trained model deployment. [[stop-trained-model-deployment-request]] diff --git a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc index d49ee3c6e872c..a986e412f1a8a 100644 --- a/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/update-trained-model-deployment.asciidoc @@ -7,6 +7,12 @@ Update trained model deployment ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-ml-trained-model[{ml-cap} trained model APIs]. +-- + Updates certain properties of a trained model deployment. [[update-trained-model-deployment-request]] diff --git a/docs/reference/modules/cluster.asciidoc b/docs/reference/modules/cluster.asciidoc index b3eaa5b47c238..cf8e97de5e188 100644 --- a/docs/reference/modules/cluster.asciidoc +++ b/docs/reference/modules/cluster.asciidoc @@ -27,7 +27,23 @@ include::cluster/shards_allocation.asciidoc[] include::cluster/disk_allocator.asciidoc[] -include::cluster/allocation_awareness.asciidoc[] +[[shard-allocation-awareness-settings]] +==== Shard allocation awareness settings + +You can use <> as _awareness attributes_ to enable {es} +to take your physical hardware configuration into account when allocating shards. +If {es} knows which nodes are on the same physical server, in the same rack, or +in the same zone, it can distribute the primary shard and its replica shards to +minimize the risk of losing all shard copies in the event of a failure. <>. + +`cluster.routing.allocation.awareness.attributes`:: + (<>) + The node attributes that {es} should use as awareness attributes. For example, if you have a `rack_id` attribute that specifies the rack in which each node resides, you can set this setting to `rack_id` to ensure that primary and replica shards are not allocated on the same rack. You can specify multiple attributes as a comma-separated list. + +`cluster.routing.allocation.awareness.force.*`:: + (<>) + The shard allocation awareness values that must exist for shards to be reallocated in case of location failure. Learn more about <>. + include::cluster/allocation_filtering.asciidoc[] diff --git a/docs/reference/modules/cluster/allocation_awareness.asciidoc b/docs/reference/modules/cluster/allocation_awareness.asciidoc index 9c6197f9ba40d..34164cd364fc5 100644 --- a/docs/reference/modules/cluster/allocation_awareness.asciidoc +++ b/docs/reference/modules/cluster/allocation_awareness.asciidoc @@ -1,5 +1,5 @@ [[shard-allocation-awareness]] -==== Shard allocation awareness +== Shard allocation awareness You can use custom node attributes as _awareness attributes_ to enable {es} to take your physical hardware configuration into account when allocating shards. @@ -7,12 +7,7 @@ If {es} knows which nodes are on the same physical server, in the same rack, or in the same zone, it can distribute the primary shard and its replica shards to minimize the risk of losing all shard copies in the event of a failure. -When shard allocation awareness is enabled with the -<> -`cluster.routing.allocation.awareness.attributes` setting, shards are only -allocated to nodes that have values set for the specified awareness attributes. -If you use multiple awareness attributes, {es} considers each attribute -separately when allocating shards. +When shard allocation awareness is enabled with the `cluster.routing.allocation.awareness.attributes` setting, shards are only allocated to nodes that have values set for the specified awareness attributes. If you use multiple awareness attributes, {es} considers each attribute separately when allocating shards. NOTE: The number of attribute values determines how many shard copies are allocated in each location. If the number of nodes in each location is @@ -22,11 +17,11 @@ unassigned. TIP: Learn more about <>. [[enabling-awareness]] -===== Enabling shard allocation awareness +=== Enabling shard allocation awareness To enable shard allocation awareness: -. Specify the location of each node with a custom node attribute. For example, +. Specify the location of each node with a <>. For example, if you want Elasticsearch to distribute shards across different racks, you might use an awareness attribute called `rack_id`. + @@ -94,7 +89,7 @@ copies of a particular shard from being allocated in the same location, you can enable forced awareness. [[forced-awareness]] -===== Forced awareness +=== Forced awareness By default, if one location fails, {es} spreads its shards across the remaining locations. This might be undesirable if the cluster does not have sufficient diff --git a/docs/reference/modules/cluster/allocation_filtering.asciidoc b/docs/reference/modules/cluster/allocation_filtering.asciidoc index e70e43682973b..d0d2652059048 100644 --- a/docs/reference/modules/cluster/allocation_filtering.asciidoc +++ b/docs/reference/modules/cluster/allocation_filtering.asciidoc @@ -6,7 +6,7 @@ allocates shards from any index. These cluster wide filters are applied in conjunction with <> and <>. -Shard allocation filters can be based on custom node attributes or the built-in +Shard allocation filters can be based on <> or the built-in `_name`, `_host_ip`, `_publish_ip`, `_ip`, `_host`, `_id` and `_tier` attributes. The `cluster.routing.allocation` settings are <>, enabling live indices to @@ -59,9 +59,9 @@ The cluster allocation settings support the following built-in attributes: NOTE: `_tier` filtering is based on <> roles. Only a subset of roles are <> roles, and the generic -<> will match any tier filtering. +<> will match any tier filtering. a subset of roles that are <> roles, but the generic -<> will match any tier filtering. +<> will match any tier filtering. You can use wildcards when specifying attribute values, for example: diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc index 02cc48c6e27fc..8efe4c0132e86 100644 --- a/docs/reference/modules/cluster/disk_allocator.asciidoc +++ b/docs/reference/modules/cluster/disk_allocator.asciidoc @@ -41,6 +41,23 @@ on the affected node drops below the high watermark, {es} automatically removes the write block. Refer to <> to resolve persistent watermark errors. +[NOTE] +.Max headroom settings +=================================================== + +Max headroom settings apply only when watermark settings are percentages or ratios. + +A max headroom value is intended to cap the required free disk space before hitting +the respective watermark. This is useful for servers with larger disks, where a percentage or ratio watermark could translate to an overly large free disk space requirement. In this case, the max headroom can be used to cap the required free disk space amount. + +For example, where `cluster.routing.allocation.disk.watermark.flood_stage` is 95% and `cluster.routing.allocation.disk.watermark.flood_stage.max_headroom` is 100GB, this means that: + +* For a smaller disk, e.g., of 100GB, the flood watermark will hit at 95%, meaning at 5GB of free space, since 5GB is smaller than the 100GB max headroom value. +* For a larger disk, e.g., of 100TB, the flood watermark will hit at 100GB of free space. That is because the 95% flood watermark alone would require 5TB of free disk space, but is capped by the max headroom setting to 100GB. + +Max headroom settings have their default values only if their respective watermark settings are not explicitly set. If watermarks are explicitly set, then the max headroom settings do not have their default values, and need to be explicitly set if they are needed. +=================================================== + [[disk-based-shard-allocation-does-not-balance]] [TIP] ==== @@ -100,18 +117,7 @@ is now `true`. The setting will be removed in a future release. + -- (<>) -Controls the flood stage watermark, which defaults to 95%. {es} enforces a read-only index block (`index.blocks.read_only_allow_delete`) on every index that has one or more shards allocated on the node, and that has at least one disk exceeding the flood stage. This setting is a last resort to prevent nodes from running out of disk space. The index block is automatically released when the disk utilization falls below the high watermark. Similarly to the low and high watermark values, it can alternatively be set to a ratio value, e.g., `0.95`, or an absolute byte value. - -An example of resetting the read-only index block on the `my-index-000001` index: - -[source,console] --------------------------------------------------- -PUT /my-index-000001/_settings -{ - "index.blocks.read_only_allow_delete": null -} --------------------------------------------------- -// TEST[setup:my_index] +Controls the flood stage watermark, which defaults to 95%. {es} enforces a read-only index block (<>) on every index that has one or more shards allocated on the node, and that has at least one disk exceeding the flood stage. This setting is a last resort to prevent nodes from running out of disk space. The index block is automatically released when the disk utilization falls below the high watermark. Similarly to the low and high watermark values, it can alternatively be set to a ratio value, e.g., `0.95`, or an absolute byte value. -- // end::cluster-routing-flood-stage-tag[] @@ -121,10 +127,10 @@ Defaults to 100GB when `cluster.routing.allocation.disk.watermark.flood_stage` is not explicitly set. This caps the amount of free space required. -NOTE: You cannot mix the usage of percentage/ratio values and byte values across +NOTE: You can't mix the usage of percentage/ratio values and byte values across the `cluster.routing.allocation.disk.watermark.low`, `cluster.routing.allocation.disk.watermark.high`, and `cluster.routing.allocation.disk.watermark.flood_stage` settings. Either all values -are set to percentage/ratio values, or all are set to byte values. This enforcement is +must be set to percentage/ratio values, or all must be set to byte values. This is required so that {es} can validate that the settings are internally consistent, ensuring that the low disk threshold is less than the high disk threshold, and the high disk threshold is less than the flood stage threshold. A similar comparison check is done for the max @@ -150,44 +156,6 @@ set. This caps the amount of free space required on dedicated frozen nodes. cluster. Defaults to `30s`. NOTE: Percentage values refer to used disk space, while byte values refer to -free disk space. This can be confusing, since it flips the meaning of high and +free disk space. This can be confusing, because it flips the meaning of high and low. For example, it makes sense to set the low watermark to 10gb and the high -watermark to 5gb, but not the other way around. - -An example of updating the low watermark to at least 100 gigabytes free, a high -watermark of at least 50 gigabytes free, and a flood stage watermark of 10 -gigabytes free, and updating the information about the cluster every minute: - -[source,console] --------------------------------------------------- -PUT _cluster/settings -{ - "persistent": { - "cluster.routing.allocation.disk.watermark.low": "100gb", - "cluster.routing.allocation.disk.watermark.high": "50gb", - "cluster.routing.allocation.disk.watermark.flood_stage": "10gb", - "cluster.info.update.interval": "1m" - } -} --------------------------------------------------- - -Concerning the max headroom settings for the watermarks, please note -that these apply only in the case that the watermark settings are percentages/ratios. -The aim of a max headroom value is to cap the required free disk space before hitting -the respective watermark. This is especially useful for servers with larger -disks, where a percentage/ratio watermark could translate to a big free disk space requirement, -and the max headroom can be used to cap the required free disk space amount. -As an example, let us take the default settings for the flood watermark. -It has a 95% default value, and the flood max headroom setting has a default value of 100GB. -This means that: - -* For a smaller disk, e.g., of 100GB, the flood watermark will hit at 95%, meaning at 5GB -of free space, since 5GB is smaller than the 100GB max headroom value. -* For a larger disk, e.g., of 100TB, the flood watermark will hit at 100GB of free space. -That is because the 95% flood watermark alone would require 5TB of free disk space, but -that is capped by the max headroom setting to 100GB. - -Finally, the max headroom settings have their default values only if their respective watermark -settings are not explicitly set (thus, they have their default percentage values). -If watermarks are explicitly set, then the max headroom settings do not have their default values, -and would need to be explicitly set if they are desired. +watermark to 5gb, but not the other way around. \ No newline at end of file diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 75eaca88c66b1..b66ac1fdb0cca 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -1,6 +1,9 @@ [[misc-cluster-settings]] === Miscellaneous cluster settings +[[cluster-name]] +include::{es-ref-dir}/setup/important-settings/cluster-name.asciidoc[] + [discrete] [[cluster-read-only]] ==== Metadata diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc index 5120c1d17e69b..a885f1633ea49 100644 --- a/docs/reference/modules/discovery/bootstrapping.asciidoc +++ b/docs/reference/modules/discovery/bootstrapping.asciidoc @@ -2,7 +2,7 @@ === Bootstrapping a cluster Starting an Elasticsearch cluster for the very first time requires the initial -set of <> to be explicitly defined on one or +set of <> to be explicitly defined on one or more of the master-eligible nodes in the cluster. This is known as _cluster bootstrapping_. This is only required the first time a cluster starts up. Freshly-started nodes that are joining a running cluster obtain this diff --git a/docs/reference/modules/discovery/publishing.asciidoc b/docs/reference/modules/discovery/publishing.asciidoc index af664585085c2..2bb031e6a9860 100644 --- a/docs/reference/modules/discovery/publishing.asciidoc +++ b/docs/reference/modules/discovery/publishing.asciidoc @@ -1,5 +1,23 @@ +[[cluster-state-overview]] +=== Cluster state + +The _cluster state_ is an internal data structure which keeps track of a +variety of information needed by every node, including: + +* The identity and attributes of the other nodes in the cluster + +* Cluster-wide settings + +* Index metadata, including the mapping and settings for each index + +* The location and status of every shard copy in the cluster + +The elected master node ensures that every node in the cluster has a copy of +the same cluster state. The <> lets you retrieve a +representation of this internal state for debugging or diagnostic purposes. + [[cluster-state-publishing]] -=== Publishing the cluster state +==== Publishing the cluster state The elected master node is the only node in a cluster that can make changes to the cluster state. The elected master node processes one batch of cluster state @@ -58,3 +76,16 @@ speed of the storage on each master-eligible node, as well as the reliability and latency of the network interconnections between all nodes in the cluster. You must therefore ensure that the storage and networking available to the nodes in your cluster are good enough to meet your performance goals. + +[[dangling-index]] +==== Dangling indices + +When a node joins the cluster, if it finds any shards stored in its local +data directory that do not already exist in the cluster state, it will consider +those shards to belong to a "dangling" index. You can list, import or +delete dangling indices using the <>. + +NOTE: The API cannot offer any guarantees as to whether the imported data +truly represents the latest state of the data when the index was still part +of the cluster. \ No newline at end of file diff --git a/docs/reference/modules/discovery/voting.asciidoc b/docs/reference/modules/discovery/voting.asciidoc index 04cae9d02ab66..f4bd4756d8978 100644 --- a/docs/reference/modules/discovery/voting.asciidoc +++ b/docs/reference/modules/discovery/voting.asciidoc @@ -2,7 +2,7 @@ === Voting configurations Each {es} cluster has a _voting configuration_, which is the set of -<> whose responses are counted when making +<> whose responses are counted when making decisions such as electing a new master or committing a new cluster state. Decisions are made only after a majority (more than half) of the nodes in the voting configuration respond. diff --git a/docs/reference/modules/gateway.asciidoc b/docs/reference/modules/gateway.asciidoc index bf7e6de64f093..35a5ef6ddec00 100644 --- a/docs/reference/modules/gateway.asciidoc +++ b/docs/reference/modules/gateway.asciidoc @@ -1,10 +1,11 @@ [[modules-gateway]] === Local gateway settings +[[dangling-indices]] The local gateway stores the cluster state and shard data across full cluster restarts. -The following _static_ settings, which must be set on every <>, +The following _static_ settings, which must be set on every <>, control how long a freshly elected master should wait before it tries to recover the <> and the cluster's data. @@ -36,17 +37,4 @@ These settings can be configured in `elasticsearch.yml` as follows: gateway.expected_data_nodes: 3 gateway.recover_after_time: 600s gateway.recover_after_data_nodes: 3 --------------------------------------------------- - -[[dangling-indices]] -==== Dangling indices - -When a node joins the cluster, if it finds any shards stored in its local -data directory that do not already exist in the cluster, it will consider -those shards to belong to a "dangling" index. You can list, import or -delete dangling indices using the <>. - -NOTE: The API cannot offer any guarantees as to whether the imported data -truly represents the latest state of the data when the index was still part -of the cluster. +-------------------------------------------------- \ No newline at end of file diff --git a/docs/reference/modules/indices/fielddata.asciidoc b/docs/reference/modules/indices/fielddata.asciidoc index 1383bf74d6d4c..688685c0a2247 100644 --- a/docs/reference/modules/indices/fielddata.asciidoc +++ b/docs/reference/modules/indices/fielddata.asciidoc @@ -5,10 +5,6 @@ The field data cache contains <> and <>. This behavior can be configured. @@ -20,16 +16,12 @@ at the cost of rebuilding the cache as needed. If the circuit breaker limit is reached, further requests that increase the cache size will be prevented. In this case you should manually <>. +TIP: You can monitor memory usage for field data as well as the field data circuit +breaker using +the <> or the <>. + `indices.fielddata.cache.size`:: (<>) The max size of the field data cache, eg `38%` of node heap space, or an absolute value, eg `12GB`. Defaults to unbounded. If you choose to set it, -it should be smaller than <> limit. - -[discrete] -[[fielddata-monitoring]] -==== Monitoring field data - -You can monitor memory usage for field data as well as the field data circuit -breaker using -the <> or the <>. +it should be smaller than <> limit. \ No newline at end of file diff --git a/docs/reference/modules/indices/request_cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc index 4d4d349c685a1..f6ad65245836f 100644 --- a/docs/reference/modules/indices/request_cache.asciidoc +++ b/docs/reference/modules/indices/request_cache.asciidoc @@ -1,4 +1,4 @@ -[[shard-request-cache]] +[[shard-request-cache-settings]] === Shard request cache settings When a search request is run against an index or against many indices, each @@ -10,139 +10,16 @@ The shard-level request cache module caches the local results on each shard. This allows frequently used (and potentially heavy) search requests to return results almost instantly. The requests cache is a very good fit for the logging use case, where only the most recent index is being actively updated -- -results from older indices will be served directly from the cache. +results from older indices will be served directly from the cache. You can use shard request cache settings to control the size and expiration of the cache. -[IMPORTANT] -=================================== - -By default, the requests cache will only cache the results of search requests -where `size=0`, so it will not cache `hits`, -but it will cache `hits.total`, <>, and -<>. - -Most queries that use `now` (see <>) cannot be cached. - -Scripted queries that use the API calls which are non-deterministic, such as -`Math.random()` or `new Date()` are not cached. -=================================== - -[discrete] -==== Cache invalidation - -The cache is smart -- it keeps the same _near real-time_ promise as uncached -search. - -Cached results are invalidated automatically whenever the shard refreshes to -pick up changes to the documents or when you update the mapping. In other -words you will always get the same results from the cache as you would for an -uncached search request. - -The longer the refresh interval, the longer that cached entries will remain -valid even if there are changes to the documents. If the cache is full, the -least recently used cache keys will be evicted. - -The cache can be expired manually with the <>: - -[source,console] ------------------------- -POST /my-index-000001,my-index-000002/_cache/clear?request=true ------------------------- -// TEST[s/^/PUT my-index-000001\nPUT my-index-000002\n/] - -[discrete] -==== Enabling and disabling caching - -The cache is enabled by default, but can be disabled when creating a new -index as follows: - -[source,console] ------------------------------ -PUT /my-index-000001 -{ - "settings": { - "index.requests.cache.enable": false - } -} ------------------------------ - -It can also be enabled or disabled dynamically on an existing index with the -<> API: - -[source,console] ------------------------------ -PUT /my-index-000001/_settings -{ "index.requests.cache.enable": true } ------------------------------ -// TEST[continued] - - -[discrete] -==== Enabling and disabling caching per request - -The `request_cache` query-string parameter can be used to enable or disable -caching on a *per-request* basis. If set, it overrides the index-level setting: - -[source,console] ------------------------------ -GET /my-index-000001/_search?request_cache=true -{ - "size": 0, - "aggs": { - "popular_colors": { - "terms": { - "field": "colors" - } - } - } -} ------------------------------ -// TEST[continued] - -Requests where `size` is greater than 0 will not be cached even if the request cache is -enabled in the index settings. To cache these requests you will need to use the -query-string parameter detailed here. - -[discrete] -==== Cache key - -A hash of the whole JSON body is used as the cache key. This means that if the JSON -changes -- for instance if keys are output in a different order -- then the -cache key will not be recognised. - -TIP: Most JSON libraries support a _canonical_ mode which ensures that JSON -keys are always emitted in the same order. This canonical mode can be used in -the application to ensure that a request is always serialized in the same way. +To learn more about the shard request cache, see <>. [discrete] ==== Cache settings -The cache is managed at the node level, and has a default maximum size of `1%` -of the heap. This can be changed in the `config/elasticsearch.yml` file with: - -[source,yaml] --------------------------------- -indices.requests.cache.size: 2% --------------------------------- - -Also, you can use the +indices.requests.cache.expire+ setting to specify a TTL -for cached results, but there should be no reason to do so. Remember that -stale results are automatically invalidated when the index is refreshed. This -setting is provided for completeness' sake only. - -[discrete] -==== Monitoring cache usage - -The size of the cache (in bytes) and the number of evictions can be viewed -by index, with the <> API: - -[source,console] ------------------------- -GET /_stats/request_cache?human ------------------------- +`indices.requests.cache.size`:: +(<>) The maximum size of the cache, as a percentage of the heap. Default: `1%`. -or by node with the <> API: +`indices.requests.cache.expire`:: +(<>) The TTL for cached results. Stale results are automatically invalidated when the index is refreshed, so you shouldn't need to use this setting. -[source,console] ------------------------- -GET /_nodes/stats/indices/request_cache?human ------------------------- diff --git a/docs/reference/modules/network.asciidoc b/docs/reference/modules/network.asciidoc index 1e4c5a21d386c..5095d73058d75 100644 --- a/docs/reference/modules/network.asciidoc +++ b/docs/reference/modules/network.asciidoc @@ -286,3 +286,22 @@ include::remote-cluster-network.asciidoc[] include::network/tracers.asciidoc[] include::network/threading.asciidoc[] + +[[tcp-readiness-port]] +==== TCP readiness port + +preview::[] + +If configured, a node can open a TCP port when the node is in a ready state. A node is deemed +ready when it has successfully joined a cluster. In a single node configuration, the node is +said to be ready, when it's able to accept requests. + +To enable the readiness TCP port, use the `readiness.port` setting. The readiness service will bind to +all host addresses. + +If the node leaves the cluster, or the <> is used to mark the node +for shutdown, the readiness port is immediately closed. + +A successful connection to the readiness TCP port signals that the {es} node is ready. When a client +connects to the readiness port, the server simply terminates the socket connection. No data is sent back +to the client. If a client cannot connect to the readiness port, the node is not ready. \ No newline at end of file diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 022e8b5d1e2fe..f0cb3eecdf390 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -1,5 +1,5 @@ [[modules-node]] -=== Nodes +=== Node settings Any time that you start an instance of {es}, you are starting a _node_. A collection of connected nodes is called a <>. If you @@ -18,24 +18,35 @@ TIP: The performance of an {es} node is often limited by the performance of the Review our recommendations for optimizing your storage for <> and <>. +[[node-name-settings]] +==== Node name setting + +include::{es-ref-dir}/setup/important-settings/node-name.asciidoc[] + [[node-roles]] -==== Node roles +==== Node role settings You define a node's roles by setting `node.roles` in `elasticsearch.yml`. If you set `node.roles`, the node is only assigned the roles you specify. If you don't set `node.roles`, the node is assigned the following roles: -* `master` -* `data` +* [[master-node]]`master` +* [[data-node]]`data` * `data_content` * `data_hot` * `data_warm` * `data_cold` * `data_frozen` * `ingest` -* `ml` +* [[ml-node]]`ml` * `remote_cluster_client` -* `transform` +* [[transform-node]]`transform` + +The following additional roles are available: + +* `voting_only` + +[[coordinating-only-node]]If you leave `node.roles` unset, then the node is considered to be a <>. [IMPORTANT] ==== @@ -65,386 +76,7 @@ As the cluster grows and in particular if you have large {ml} jobs or {ctransforms}, consider separating dedicated master-eligible nodes from dedicated data nodes, {ml} nodes, and {transform} nodes. -<>:: - -A node that has the `master` role, which makes it eligible to be -<>, which controls the cluster. - -<>:: - -A node that has one of several data roles. Data nodes hold data and perform data -related operations such as CRUD, search, and aggregations. A node with a generic `data` role can fill any of the specialized data node roles. - -<>:: - -A node that has the `ingest` role. Ingest nodes are able to apply an -<> to a document in order to transform and enrich the -document before indexing. With a heavy ingest load, it makes sense to use -dedicated ingest nodes and to not include the `ingest` role from nodes that have -the `master` or `data` roles. - -<>:: - -A node that has the `remote_cluster_client` role, which makes it eligible to act -as a remote client. - -<>:: - -A node that has the `ml` role. If you want to use {ml-features}, there must be -at least one {ml} node in your cluster. For more information, see -<> and {ml-docs}/index.html[Machine learning in the {stack}]. - -<>:: - -A node that has the `transform` role. If you want to use {transforms}, there -must be at least one {transform} node in your cluster. For more information, see -<> and <>. - -[NOTE] -[[coordinating-node]] -.Coordinating node -=============================================== - -Requests like search requests or bulk-indexing requests may involve data held -on different data nodes. A search request, for example, is executed in two -phases which are coordinated by the node which receives the client request -- -the _coordinating node_. - -In the _scatter_ phase, the coordinating node forwards the request to the data -nodes which hold the data. Each data node executes the request locally and -returns its results to the coordinating node. In the _gather_ phase, the -coordinating node reduces each data node's results into a single global -result set. - -Every node is implicitly a coordinating node. This means that a node that has -an explicit empty list of roles via `node.roles` will only act as a coordinating -node, which cannot be disabled. As a result, such a node needs to have enough -memory and CPU in order to deal with the gather phase. - -=============================================== - -[[master-node]] -==== Master-eligible node - -The master node is responsible for lightweight cluster-wide actions such as -creating or deleting an index, tracking which nodes are part of the cluster, -and deciding which shards to allocate to which nodes. It is important for -cluster health to have a stable master node. - -Any master-eligible node that is not a <> may -be elected to become the master node by the <>. - -IMPORTANT: Master nodes must have a `path.data` directory whose contents -persist across restarts, just like data nodes, because this is where the -cluster metadata is stored. The cluster metadata describes how to read the data -stored on the data nodes, so if it is lost then the data stored on the data -nodes cannot be read. - -[[dedicated-master-node]] -===== Dedicated master-eligible node - -It is important for the health of the cluster that the elected master node has -the resources it needs to fulfill its responsibilities. If the elected master -node is overloaded with other tasks then the cluster will not operate well. The -most reliable way to avoid overloading the master with other tasks is to -configure all the master-eligible nodes to be _dedicated master-eligible nodes_ -which only have the `master` role, allowing them to focus on managing the -cluster. Master-eligible nodes will still also behave as -<> that route requests from clients to -the other nodes in the cluster, but you should _not_ use dedicated master nodes -for this purpose. - -A small or lightly-loaded cluster may operate well if its master-eligible nodes -have other roles and responsibilities, but once your cluster comprises more -than a handful of nodes it usually makes sense to use dedicated master-eligible -nodes. - -To create a dedicated master-eligible node, set: - -[source,yaml] -------------------- -node.roles: [ master ] -------------------- - -[[voting-only-node]] -===== Voting-only master-eligible node - -A voting-only master-eligible node is a node that participates in -<> but which will not act as the cluster's -elected master node. In particular, a voting-only node can serve as a tiebreaker -in elections. - -It may seem confusing to use the term "master-eligible" to describe a -voting-only node since such a node is not actually eligible to become the master -at all. This terminology is an unfortunate consequence of history: -master-eligible nodes are those nodes that participate in elections and perform -certain tasks during cluster state publications, and voting-only nodes have the -same responsibilities even if they can never become the elected master. - -To configure a master-eligible node as a voting-only node, include `master` and -`voting_only` in the list of roles. For example to create a voting-only data -node: - -[source,yaml] -------------------- -node.roles: [ data, master, voting_only ] -------------------- - -IMPORTANT: Only nodes with the `master` role can be marked as having the -`voting_only` role. - -High availability (HA) clusters require at least three master-eligible nodes, at -least two of which are not voting-only nodes. Such a cluster will be able to -elect a master node even if one of the nodes fails. - -Voting-only master-eligible nodes may also fill other roles in your cluster. -For instance, a node may be both a data node and a voting-only master-eligible -node. A _dedicated_ voting-only master-eligible nodes is a voting-only -master-eligible node that fills no other roles in the cluster. To create a -dedicated voting-only master-eligible node, set: - -[source,yaml] -------------------- -node.roles: [ master, voting_only ] -------------------- - -Since dedicated voting-only nodes never act as the cluster's elected master, -they may require less heap and a less powerful CPU than the true master nodes. -However all master-eligible nodes, including voting-only nodes, are on the -critical path for <>. Cluster state updates are usually independent of -performance-critical workloads such as indexing or searches, but they are -involved in management activities such as index creation and rollover, mapping -updates, and recovery after a failure. The performance characteristics of these -activities are a function of the speed of the storage on each master-eligible -node, as well as the reliability and latency of the network interconnections -between the elected master node and the other nodes in the cluster. You must -therefore ensure that the storage and networking available to the nodes in your -cluster are good enough to meet your performance goals. - -[[data-node]] -==== Data nodes - -Data nodes hold the shards that contain the documents you have indexed. Data -nodes handle data related operations like CRUD, search, and aggregations. -These operations are I/O-, memory-, and CPU-intensive. It is important to -monitor these resources and to add more data nodes if they are overloaded. - -The main benefit of having dedicated data nodes is the separation of the master -and data roles. - -In a multi-tier deployment architecture, you use specialized data roles to -assign data nodes to specific tiers: `data_content`,`data_hot`, `data_warm`, -`data_cold`, or `data_frozen`. A node can belong to multiple tiers. - -If you want to include a node in all tiers, or if your cluster does not use multiple tiers, then you can use the generic `data` role. - -include::../how-to/shard-limits.asciidoc[] - -WARNING: If you assign a node to a specific tier using a specialized data role, then you shouldn't also assign it the generic `data` role. The generic `data` role takes precedence over specialized data roles. - -[[generic-data-node]] -===== Generic data node - -Generic data nodes are included in all content tiers. - -To create a dedicated generic data node, set: -[source,yaml] ----- -node.roles: [ data ] ----- - -[[data-content-node]] -===== Content data node - -Content data nodes are part of the content tier. -include::{es-ref-dir}/datatiers.asciidoc[tag=content-tier] - -To create a dedicated content node, set: -[source,yaml] ----- -node.roles: [ data_content ] ----- - -[[data-hot-node]] -===== Hot data node - -Hot data nodes are part of the hot tier. -include::{es-ref-dir}/datatiers.asciidoc[tag=hot-tier] - -To create a dedicated hot node, set: -[source,yaml] ----- -node.roles: [ data_hot ] ----- - -[[data-warm-node]] -===== Warm data node - -Warm data nodes are part of the warm tier. -include::{es-ref-dir}/datatiers.asciidoc[tag=warm-tier] - -To create a dedicated warm node, set: -[source,yaml] ----- -node.roles: [ data_warm ] ----- - -[[data-cold-node]] -===== Cold data node - -Cold data nodes are part of the cold tier. -include::{es-ref-dir}/datatiers.asciidoc[tag=cold-tier] - -To create a dedicated cold node, set: -[source,yaml] ----- -node.roles: [ data_cold ] ----- - -[[data-frozen-node]] -===== Frozen data node - -Frozen data nodes are part of the frozen tier. -include::{es-ref-dir}/datatiers.asciidoc[tag=frozen-tier] - -To create a dedicated frozen node, set: -[source,yaml] ----- -node.roles: [ data_frozen ] ----- - -[[node-ingest-node]] -==== Ingest node - -Ingest nodes can execute pre-processing pipelines, composed of one or more -ingest processors. Depending on the type of operations performed by the ingest -processors and the required resources, it may make sense to have dedicated -ingest nodes, that will only perform this specific task. - -To create a dedicated ingest node, set: - -[source,yaml] ----- -node.roles: [ ingest ] ----- - -[[coordinating-only-node]] -==== Coordinating only node - -If you take away the ability to be able to handle master duties, to hold data, -and pre-process documents, then you are left with a _coordinating_ node that -can only route requests, handle the search reduce phase, and distribute bulk -indexing. Essentially, coordinating only nodes behave as smart load balancers. - -Coordinating only nodes can benefit large clusters by offloading the -coordinating node role from data and master-eligible nodes. They join the -cluster and receive the full <>, like every other -node, and they use the cluster state to route requests directly to the -appropriate place(s). - -WARNING: Adding too many coordinating only nodes to a cluster can increase the -burden on the entire cluster because the elected master node must await -acknowledgement of cluster state updates from every node! The benefit of -coordinating only nodes should not be overstated -- data nodes can happily -serve the same purpose. - -To create a dedicated coordinating node, set: - -[source,yaml] ----- -node.roles: [ ] ----- - -[[remote-node]] -==== Remote-eligible node - -A remote-eligible node acts as a cross-cluster client and connects to -<>. Once connected, you can search -remote clusters using <>. You can also sync -data between clusters using <>. - -[source,yaml] ----- -node.roles: [ remote_cluster_client ] ----- - -[[ml-node]] -==== [xpack]#Machine learning node# - -{ml-cap} nodes run jobs and handle {ml} API requests. For more information, see -<>. - -To create a dedicated {ml} node, set: - -[source,yaml] ----- -node.roles: [ ml, remote_cluster_client] ----- - -The `remote_cluster_client` role is optional but strongly recommended. -Otherwise, {ccs} fails when used in {ml} jobs or {dfeeds}. If you use {ccs} in -your {anomaly-jobs}, the `remote_cluster_client` role is also required on all -master-eligible nodes. Otherwise, the {dfeed} cannot start. See <>. - -[[transform-node]] -==== [xpack]#{transform-cap} node# - -{transform-cap} nodes run {transforms} and handle {transform} API requests. For -more information, see <>. - -To create a dedicated {transform} node, set: - -[source,yaml] ----- -node.roles: [ transform, remote_cluster_client ] ----- - -The `remote_cluster_client` role is optional but strongly recommended. -Otherwise, {ccs} fails when used in {transforms}. See <>. - -[[change-node-role]] -==== Changing the role of a node - -Each data node maintains the following data on disk: - -* the shard data for every shard allocated to that node, -* the index metadata corresponding with every shard allocated to that node, and -* the cluster-wide metadata, such as settings and index templates. - -Similarly, each master-eligible node maintains the following data on disk: - -* the index metadata for every index in the cluster, and -* the cluster-wide metadata, such as settings and index templates. - -Each node checks the contents of its data path at startup. If it discovers -unexpected data then it will refuse to start. This is to avoid importing -unwanted <> which can lead -to a red cluster health. To be more precise, nodes without the `data` role will -refuse to start if they find any shard data on disk at startup, and nodes -without both the `master` and `data` roles will refuse to start if they have any -index metadata on disk at startup. - -It is possible to change the roles of a node by adjusting its -`elasticsearch.yml` file and restarting it. This is known as _repurposing_ a -node. In order to satisfy the checks for unexpected data described above, you -must perform some extra steps to prepare a node for repurposing when starting -the node without the `data` or `master` roles. - -* If you want to repurpose a data node by removing the `data` role then you - should first use an <> to safely - migrate all the shard data onto other nodes in the cluster. - -* If you want to repurpose a node to have neither the `data` nor `master` roles - then it is simplest to start a brand-new node with an empty data path and the - desired roles. You may find it safest to use an - <> to migrate the shard data elsewhere - in the cluster first. - -If it is not possible to follow these extra steps then you may be able to use -the <> tool to delete any -excess data that prevents a node from starting. +To learn more about the available node roles, see <>. [discrete] === Node data path settings @@ -495,6 +127,25 @@ modify the contents of the data directory. The data directory contains no executables so a virus scan will only find false positives. // end::modules-node-data-path-warning-tag[] +[[custom-node-attributes]] +==== Custom node attributes + +If needed, you can add custom attributes to a node. These attributes can be used to <>, or to group nodes together for <>. + +[TIP] +=============================================== +You can also set a node attribute using the `-E` command line argument when you start a node: + +[source,sh] +-------------------------------------------------------- +./bin/elasticsearch -Enode.attr.rack_id=rack_one +-------------------------------------------------------- +=============================================== + +`node.attr.`:: + (<>) + A custom attribute that you can assign to a node. For example, you might assign a `rack_id` attribute to each node to ensure that primary and replica shards are not allocated on the same rack. You can specify multiple attributes as a comma-separated list. + [discrete] [[other-node-settings]] === Other node settings @@ -504,4 +155,4 @@ including: * <> * <> -* <> +* <> \ No newline at end of file diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index ca1c507aa4ed9..87078c0f1956f 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -80,7 +80,7 @@ The _gateway nodes_ selection depends on the following criteria: + * *version*: Remote nodes must be compatible with the cluster they are registered to. -* *role*: By default, any non-<> node can act as a +* *role*: By default, any non-<> node can act as a gateway node. Dedicated master nodes are never selected as gateway nodes. * *attributes*: You can define the gateway nodes for a cluster by setting <> to `true`. diff --git a/docs/reference/modules/shard-ops.asciidoc b/docs/reference/modules/shard-ops.asciidoc index c0e5ee6a220f0..93d6b6d3468f8 100644 --- a/docs/reference/modules/shard-ops.asciidoc +++ b/docs/reference/modules/shard-ops.asciidoc @@ -1,5 +1,5 @@ [[shard-allocation-relocation-recovery]] -=== Shard allocation, relocation, and recovery +== Shard allocation, relocation, and recovery Each <> in Elasticsearch is divided into one or more <>. Each document in an index belongs to a single shard. @@ -12,22 +12,25 @@ Over the course of normal operation, Elasticsearch allocates shard copies to nod TIP: To learn about optimizing the number and size of shards in your cluster, refer to <>. To learn about how read and write operations are replicated across shards and shard copies, refer to <>. +[discrete] [[shard-allocation]] -==== Shard allocation +=== Shard allocation include::{es-ref-dir}/modules/shard-allocation-desc.asciidoc[] By default, the primary and replica shard copies for an index can be allocated to any node in the cluster, and may be relocated to rebalance the cluster. -===== Adjust shard allocation settings +[discrete] +==== Adjust shard allocation settings You can control how shard copies are allocated using the following settings: -- <>: Use these settings to control how shard copies are allocated and balanced across the entire cluster. For example, you might want to allocate nodes availability zones, or prevent certain nodes from being used so you can perform maintenance. +- <>: Use these settings to control how shard copies are allocated and balanced across the entire cluster. For example, you might want to <>, or prevent certain nodes from being used so you can perform maintenance. - <>: Use these settings to control how the shard copies for a specific index are allocated. For example, you might want to allocate an index to a node in a specific data tier, or to an node with specific attributes. -===== Monitor shard allocation +[discrete] +==== Monitor shard allocation If a shard copy is unassigned, it means that the shard copy is not allocated to any node in the cluster. This can happen if there are not enough nodes in the cluster to allocate the shard copy, or if the shard copy can't be allocated to any node that satisfies the shard allocation filtering rules. When a shard copy is unassigned, your cluster is considered unhealthy and returns a yellow or red cluster health status. @@ -39,12 +42,14 @@ You can use the following APIs to monitor shard allocation: <>. +[discrete] [[shard-recovery]] -==== Shard recovery +=== Shard recovery include::{es-ref-dir}/modules/shard-recovery-desc.asciidoc[] -===== Adjust shard recovery settings +[discrete] +==== Adjust shard recovery settings To control how shards are recovered, for example the resources that can be used by recovery operations, and which indices should be prioritized for recovery, you can adjust the following settings: @@ -54,22 +59,25 @@ To control how shards are recovered, for example the resources that can be used Shard recovery operations also respect general shard allocation settings. -===== Monitor shard recovery +[discrete] +==== Monitor shard recovery You can use the following APIs to monitor shard allocation: - View a list of in-progress and completed recoveries using the <> - View detailed information about a specific recovery using the <> +[discrete] [[shard-relocation]] -==== Shard relocation +=== Shard relocation Shard relocation is the process of moving shard copies from one node to another. This can happen when a node joins or leaves the cluster, or when the cluster is rebalancing. When a shard copy is relocated, it is created as a new shard copy on the target node. When the shard copy is fully allocated and recovered, the old shard copy is deleted. If the shard copy being relocated is a primary, then the new shard copy is marked as primary before the old shard copy is deleted. -===== Adjust shard relocation settings +[discrete] +==== Adjust shard relocation settings You can control how and when shard copies are relocated. For example, you can adjust the rebalancing settings that control when shard copies are relocated to balance the cluster, or the high watermark for disk-based shard allocation that can trigger relocation. These settings are part of the <>. -Shard relocation operations also respect shard allocation and recovery settings. \ No newline at end of file +Shard relocation operations also respect shard allocation and recovery settings. \ No newline at end of file diff --git a/docs/reference/monitoring/index.asciidoc b/docs/reference/monitoring/index.asciidoc index 1b83f4c11ba54..82e1447ba8a1f 100644 --- a/docs/reference/monitoring/index.asciidoc +++ b/docs/reference/monitoring/index.asciidoc @@ -9,6 +9,7 @@ performance of your {es} cluster. * <> * <> +* <> * <> * <> * <> @@ -23,6 +24,8 @@ include::overview.asciidoc[] include::how-monitoring-works.asciidoc[] +include::{es-ref-dir}/setup/logging-config.asciidoc[] + include::production.asciidoc[] include::configuring-elastic-agent.asciidoc[] diff --git a/docs/reference/monitoring/overview.asciidoc b/docs/reference/monitoring/overview.asciidoc index d2a0130f71bb0..5c5d016f45ea0 100644 --- a/docs/reference/monitoring/overview.asciidoc +++ b/docs/reference/monitoring/overview.asciidoc @@ -13,6 +13,10 @@ All of the monitoring metrics are stored in {es}, which enables you to easily visualize the data in {kib}. By default, the monitoring metrics are stored in local indices. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + TIP: In production, we strongly recommend using a separate monitoring cluster. Using a separate monitoring cluster prevents production cluster outages from impacting your ability to access your monitoring data. It also prevents diff --git a/docs/reference/node-roles.asciidoc b/docs/reference/node-roles.asciidoc new file mode 100644 index 0000000000000..296c76e6dba9b --- /dev/null +++ b/docs/reference/node-roles.asciidoc @@ -0,0 +1,437 @@ +[[node-roles-overview]] +== Node roles + +Any time that you start an instance of {es}, you are starting a _node_. A +collection of connected nodes is called a <>. If you +are running a single node of {es}, then you have a cluster of one node. All nodes know about all the other nodes in the cluster and can forward client +requests to the appropriate node. + +Each node performs one or more roles. Roles control the behavior of the node in the cluster. + +[discrete] +[[set-node-roles]] +=== Set node roles + +You define a node's roles by setting `node.roles` in <>. If you set `node.roles`, the node is only assigned the roles you specify. If you don't set `node.roles`, the node is assigned the following roles: + +* `master` +* `data` +* `data_content` +* `data_hot` +* `data_warm` +* `data_cold` +* `data_frozen` +* `ingest` +* `ml` +* `remote_cluster_client` +* `transform` + +[IMPORTANT] +==== +If you set `node.roles`, ensure you specify every node role your cluster needs. +Every cluster requires the following node roles: + +* `master` +* {blank} ++ +-- +`data_content` and `data_hot` + +OR + +`data` +-- + +Some {stack} features also require specific node roles: + +- {ccs-cap} and {ccr} require the `remote_cluster_client` role. +- {stack-monitor-app} and ingest pipelines require the `ingest` role. +- {fleet}, the {security-app}, and {transforms} require the `transform` role. + The `remote_cluster_client` role is also required to use {ccs} with these + features. +- {ml-cap} features, such as {anomaly-detect}, require the `ml` role. +==== + +As the cluster grows and in particular if you have large {ml} jobs or +{ctransforms}, consider separating dedicated master-eligible nodes from +dedicated data nodes, {ml} nodes, and {transform} nodes. + +[discrete] +[[change-node-role]] +=== Change the role of a node + +Each data node maintains the following data on disk: + +* the shard data for every shard allocated to that node, +* the index metadata corresponding with every shard allocated to that node, and +* the cluster-wide metadata, such as settings and index templates. + +Similarly, each master-eligible node maintains the following data on disk: + +* the index metadata for every index in the cluster, and +* the cluster-wide metadata, such as settings and index templates. + +Each node checks the contents of its data path at startup. If it discovers +unexpected data then it will refuse to start. This is to avoid importing +unwanted <> which can lead +to a red cluster health. To be more precise, nodes without the `data` role will +refuse to start if they find any shard data on disk at startup, and nodes +without both the `master` and `data` roles will refuse to start if they have any +index metadata on disk at startup. + +It is possible to change the roles of a node by adjusting its +`elasticsearch.yml` file and restarting it. This is known as _repurposing_ a +node. In order to satisfy the checks for unexpected data described above, you +must perform some extra steps to prepare a node for repurposing when starting +the node without the `data` or `master` roles. + +* If you want to repurpose a data node by removing the `data` role then you + should first use an <> to safely + migrate all the shard data onto other nodes in the cluster. + +* If you want to repurpose a node to have neither the `data` nor `master` roles + then it is simplest to start a brand-new node with an empty data path and the + desired roles. You may find it safest to use an + <> to migrate the shard data elsewhere + in the cluster first. + +If it is not possible to follow these extra steps then you may be able to use +the <> tool to delete any +excess data that prevents a node from starting. + +[discrete] +[[node-roles-list]] +=== Available node roles + +The following is a list of the roles that a node can perform in a cluster. A node can have one or more roles. + +* <> (`master`): A node that is eligible to be +<>, which controls the cluster. + +* <> (`data`, `data_content`, `data_hot`, `data_warm`, `data_cold`, `data_frozen`): A node that has one of several data roles. Data nodes hold data and perform data related operations such as CRUD, search, and aggregations. You might use multiple data roles in a cluster so you can implement <>. + +* <> (`ingest`): Ingest nodes are able to apply an <> to a document in order to transform and enrich the document before indexing. With a heavy ingest load, it makes sense to use dedicated ingest nodes and to not include the `ingest` role from nodes that have the `master` or `data` roles. + +* <> (`remote_cluster_client`): A node that is eligible to act as a remote client. + +* <> (`ml`): A node that can run {ml-features}. If you want to use {ml-features}, there must be at least one {ml} node in your cluster. For more information, see <> and {ml-docs}/index.html[Machine learning in the {stack}]. + +* <> (`transform`): A node that can perform {transforms}. If you want to use {transforms}, there must be at least one {transform} node in your cluster. For more information, see <> and <>. + +[NOTE] +[[coordinating-node]] +.Coordinating node +=============================================== + +Requests like search requests or bulk-indexing requests may involve data held +on different data nodes. A search request, for example, is executed in two +phases which are coordinated by the node which receives the client request -- +the _coordinating node_. + +In the _scatter_ phase, the coordinating node forwards the request to the data +nodes which hold the data. Each data node executes the request locally and +returns its results to the coordinating node. In the _gather_ phase, the +coordinating node reduces each data node's results into a single global +result set. + +Every node is implicitly a coordinating node. This means that a node that has +an explicit empty list of roles in the `node.roles` setting will only act as a coordinating +node, which cannot be disabled. As a result, such a node needs to have enough +memory and CPU in order to deal with the gather phase. + +=============================================== + +[discrete] + +[[master-node-role]] +==== Master-eligible node + +The master node is responsible for lightweight cluster-wide actions such as +creating or deleting an index, tracking which nodes are part of the cluster, +and deciding which shards to allocate to which nodes. It is important for +cluster health to have a stable master node. + +Any master-eligible node that is not a <> may +be elected to become the master node by the <>. + +IMPORTANT: Master nodes must have a `path.data` directory whose contents +persist across restarts, just like data nodes, because this is where the +cluster metadata is stored. The cluster metadata describes how to read the data +stored on the data nodes, so if it is lost then the data stored on the data +nodes cannot be read. + +[discrete] +[[dedicated-master-node]] +===== Dedicated master-eligible node + +It is important for the health of the cluster that the elected master node has +the resources it needs to fulfill its responsibilities. If the elected master +node is overloaded with other tasks then the cluster will not operate well. The +most reliable way to avoid overloading the master with other tasks is to +configure all the master-eligible nodes to be _dedicated master-eligible nodes_ +which only have the `master` role, allowing them to focus on managing the +cluster. Master-eligible nodes will still also behave as +<> that route requests from clients to +the other nodes in the cluster, but you should _not_ use dedicated master nodes +for this purpose. + +A small or lightly-loaded cluster may operate well if its master-eligible nodes +have other roles and responsibilities, but once your cluster comprises more +than a handful of nodes it usually makes sense to use dedicated master-eligible +nodes. + +To create a dedicated master-eligible node, set: + +[source,yaml] +------------------- +node.roles: [ master ] +------------------- + +[discrete] +[[voting-only-node]] +===== Voting-only master-eligible node + +A voting-only master-eligible node is a node that participates in +<> but which will not act as the cluster's +elected master node. In particular, a voting-only node can serve as a tiebreaker +in elections. + +It may seem confusing to use the term "master-eligible" to describe a +voting-only node since such a node is not actually eligible to become the master +at all. This terminology is an unfortunate consequence of history: +master-eligible nodes are those nodes that participate in elections and perform +certain tasks during cluster state publications, and voting-only nodes have the +same responsibilities even if they can never become the elected master. + +To configure a master-eligible node as a voting-only node, include `master` and +`voting_only` in the list of roles. For example to create a voting-only data +node: + +[source,yaml] +------------------- +node.roles: [ data, master, voting_only ] +------------------- + +IMPORTANT: Only nodes with the `master` role can be marked as having the +`voting_only` role. + +High availability (HA) clusters require at least three master-eligible nodes, at +least two of which are not voting-only nodes. Such a cluster will be able to +elect a master node even if one of the nodes fails. + +Voting-only master-eligible nodes may also fill other roles in your cluster. +For instance, a node may be both a data node and a voting-only master-eligible +node. A _dedicated_ voting-only master-eligible nodes is a voting-only +master-eligible node that fills no other roles in the cluster. To create a +dedicated voting-only master-eligible node, set: + +[source,yaml] +------------------- +node.roles: [ master, voting_only ] +------------------- + +Since dedicated voting-only nodes never act as the cluster's elected master, +they may require less heap and a less powerful CPU than the true master nodes. +However all master-eligible nodes, including voting-only nodes, are on the +critical path for <>. Cluster state updates are usually independent of +performance-critical workloads such as indexing or searches, but they are +involved in management activities such as index creation and rollover, mapping +updates, and recovery after a failure. The performance characteristics of these +activities are a function of the speed of the storage on each master-eligible +node, as well as the reliability and latency of the network interconnections +between the elected master node and the other nodes in the cluster. You must +therefore ensure that the storage and networking available to the nodes in your +cluster are good enough to meet your performance goals. + +[discrete] +[[data-node-role]] +==== Data nodes + +Data nodes hold the shards that contain the documents you have indexed. Data +nodes handle data related operations like CRUD, search, and aggregations. +These operations are I/O-, memory-, and CPU-intensive. It is important to +monitor these resources and to add more data nodes if they are overloaded. + +The main benefit of having dedicated data nodes is the separation of the master +and data roles. + +In a multi-tier deployment architecture, you use specialized data roles to +assign data nodes to specific tiers: `data_content`,`data_hot`, `data_warm`, +`data_cold`, or `data_frozen`. A node can belong to multiple tiers. + +If you want to include a node in all tiers, or if your cluster does not use multiple tiers, then you can use the generic `data` role. + +include::{es-ref-dir}/how-to/shard-limits.asciidoc[] + +WARNING: If you assign a node to a specific tier using a specialized data role, then you shouldn't also assign it the generic `data` role. The generic `data` role takes precedence over specialized data roles. + +[discrete] +[[generic-data-node]] +===== Generic data node + +Generic data nodes are included in all content tiers. A node with a generic `data` role can fill any of the specialized data node roles. + +To create a dedicated generic data node, set: +[source,yaml] +---- +node.roles: [ data ] +---- + +[discrete] +[[data-content-node]] +===== Content data node + +Content data nodes are part of the content tier. +include::{es-ref-dir}/datatiers.asciidoc[tag=content-tier] + +To create a dedicated content node, set: +[source,yaml] +---- +node.roles: [ data_content ] +---- + +[discrete] +[[data-hot-node]] +===== Hot data node + +Hot data nodes are part of the hot tier. +include::{es-ref-dir}/datatiers.asciidoc[tag=hot-tier] + +To create a dedicated hot node, set: +[source,yaml] +---- +node.roles: [ data_hot ] +---- + +[discrete] +[[data-warm-node]] +===== Warm data node + +Warm data nodes are part of the warm tier. +include::{es-ref-dir}/datatiers.asciidoc[tag=warm-tier] + +To create a dedicated warm node, set: +[source,yaml] +---- +node.roles: [ data_warm ] +---- + +[discrete] +[[data-cold-node]] +===== Cold data node + +Cold data nodes are part of the cold tier. +include::{es-ref-dir}/datatiers.asciidoc[tag=cold-tier] + +To create a dedicated cold node, set: +[source,yaml] +---- +node.roles: [ data_cold ] +---- + +[discrete] +[[data-frozen-node]] +===== Frozen data node + +Frozen data nodes are part of the frozen tier. +include::{es-ref-dir}/datatiers.asciidoc[tag=frozen-tier] + +To create a dedicated frozen node, set: +[source,yaml] +---- +node.roles: [ data_frozen ] +---- + +[discrete] +[[node-ingest-node]] +==== Ingest node + +Ingest nodes can execute pre-processing pipelines, composed of one or more +ingest processors. Depending on the type of operations performed by the ingest +processors and the required resources, it may make sense to have dedicated +ingest nodes, that will only perform this specific task. + +To create a dedicated ingest node, set: + +[source,yaml] +---- +node.roles: [ ingest ] +---- + +[discrete] +[[coordinating-only-node-role]] +==== Coordinating only node + +If you take away the ability to be able to handle master duties, to hold data, +and pre-process documents, then you are left with a _coordinating_ node that +can only route requests, handle the search reduce phase, and distribute bulk +indexing. Essentially, coordinating only nodes behave as smart load balancers. + +Coordinating only nodes can benefit large clusters by offloading the +coordinating node role from data and master-eligible nodes. They join the +cluster and receive the full <>, like every other +node, and they use the cluster state to route requests directly to the +appropriate place(s). + +WARNING: Adding too many coordinating only nodes to a cluster can increase the +burden on the entire cluster because the elected master node must await +acknowledgement of cluster state updates from every node! The benefit of +coordinating only nodes should not be overstated -- data nodes can happily +serve the same purpose. + +To create a dedicated coordinating node, set: + +[source,yaml] +---- +node.roles: [ ] +---- + +[discrete] +[[remote-node]] +==== Remote-eligible node + +A remote-eligible node acts as a cross-cluster client and connects to +<>. Once connected, you can search +remote clusters using <>. You can also sync +data between clusters using <>. + +[source,yaml] +---- +node.roles: [ remote_cluster_client ] +---- + +[discrete] +[[ml-node-role]] +==== [xpack]#Machine learning node# + +{ml-cap} nodes run jobs and handle {ml} API requests. For more information, see +<>. + +To create a dedicated {ml} node, set: + +[source,yaml] +---- +node.roles: [ ml, remote_cluster_client] +---- + +The `remote_cluster_client` role is optional but strongly recommended. +Otherwise, {ccs} fails when used in {ml} jobs or {dfeeds}. If you use {ccs} in +your {anomaly-jobs}, the `remote_cluster_client` role is also required on all +master-eligible nodes. Otherwise, the {dfeed} cannot start. See <>. + +[discrete] +[[transform-node-role]] +==== [xpack]#{transform-cap} node# + +{transform-cap} nodes run {transforms} and handle {transform} API requests. For +more information, see <>. + +To create a dedicated {transform} node, set: + +[source,yaml] +---- +node.roles: [ transform, remote_cluster_client ] +---- + +The `remote_cluster_client` role is optional but strongly recommended. +Otherwise, {ccs} fails when used in {transforms}. See <>. \ No newline at end of file diff --git a/docs/reference/nodes-shards.asciidoc b/docs/reference/nodes-shards.asciidoc new file mode 100644 index 0000000000000..11095ed7b7eb3 --- /dev/null +++ b/docs/reference/nodes-shards.asciidoc @@ -0,0 +1,43 @@ +[[nodes-shards]] +== Nodes and shards + +[NOTE] +==== +Nodes and shards are what make {es} distributed and scalable. +These concepts aren't essential if you're just getting started. How you <> in production determines what you need to know: + +* *Self-managed {es}*: You are responsible for setting up and managing nodes, clusters, shards, and replicas. This includes managing the underlying infrastructure, scaling, and ensuring high availability through failover and backup strategies. +* *Elastic Cloud*: Elastic can autoscale resources in response to workload changes. Choose from different deployment types to apply sensible defaults for your use case. A basic understanding of nodes, shards, and replicas is still important. +* *Elastic Cloud Serverless*: You don't need to worry about nodes, shards, or replicas. These resources are 100% automated on the serverless platform, which is designed to scale with your workload. +==== + +You can add servers (_nodes_) to a cluster to increase capacity, and {es} automatically distributes your data and query load across all of the available nodes. + +Elastic is able to distribute your data across nodes by subdividing an index into _shards_. Each index in {es} is a grouping +of one or more physical shards, where each shard is a self-contained Lucene index containing a subset of the documents in +the index. By distributing the documents in an index across multiple shards, and distributing those shards across multiple +nodes, {es} increases indexing and query capacity. + +There are two types of shards: _primaries_ and _replicas_. Each document in an index belongs to one primary shard. A replica +shard is a copy of a primary shard. Replicas maintain redundant copies of your data across the nodes in your cluster. +This protects against hardware failure and increases capacity to serve read requests like searching or retrieving a document. + +[TIP] +==== +The number of primary shards in an index is fixed at the time that an index is created, but the number of replica shards can +be changed at any time, without interrupting indexing or query operations. +==== + +Shard copies in your cluster are automatically balanced across nodes to provide scale and high availability. All nodes are +aware of all the other nodes in the cluster and can forward client requests to the appropriate node. This allows {es} +to distribute indexing and query load across the cluster. + +If you're exploring {es} for the first time or working in a development environment, then you can use a cluster with a single node and create indices +with only one shard. However, in a production environment, you should build a cluster with multiple nodes and indices +with multiple shards to increase performance and resilience. + +// TODO - diagram + +* To learn about optimizing the number and size of shards in your cluster, refer to <>. +* To learn about how read and write operations are replicated across shards and shard copies, refer to <>. +* To adjust how shards are allocated and balanced across nodes, refer to <>. \ No newline at end of file diff --git a/docs/reference/path-settings-overview.asciidoc b/docs/reference/path-settings-overview.asciidoc new file mode 100644 index 0000000000000..0740b9769c9b2 --- /dev/null +++ b/docs/reference/path-settings-overview.asciidoc @@ -0,0 +1,112 @@ +[[path-settings-overview]] +=== Path settings + +include::{es-ref-dir}/setup/important-settings/path-settings.asciidoc[] + +[[multiple-data-paths]] +==== Multiple data paths +deprecated::[7.13.0] + +If needed, you can specify multiple paths in `path.data`. {es} stores the node's +data across all provided paths but keeps each shard's data on the same path. + +{es} does not balance shards across a node's data paths. High disk +usage in a single path can trigger a <> for the entire node. If triggered, {es} will not add shards to +the node, even if the node’s other paths have available disk space. If you need +additional disk space, we recommend you add a new node rather than additional +data paths. + +include::{es-ref-dir}/tab-widgets/multi-data-path-widget.asciidoc[] + +[[mdp-migrate]] +===== Migrate from multiple data paths + +Support for multiple data paths was deprecated in 7.13 and will be removed +in a future release. + +As an alternative to multiple data paths, you can create a filesystem which +spans multiple disks with a hardware virtualisation layer such as RAID, or a +software virtualisation layer such as Logical Volume Manager (LVM) on Linux or +Storage Spaces on Windows. If you wish to use multiple data paths on a single +machine then you must run one node for each data path. + +If you currently use multiple data paths in a +{ref}/high-availability-cluster-design.html[highly available cluster] then you +can migrate to a setup that uses a single path for each node without downtime +using a process similar to a +{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart]: shut each +node down in turn and replace it with one or more nodes each configured to use +a single data path. In more detail, for each node that currently has multiple +data paths you should follow the following process. In principle you can +perform this migration during a rolling upgrade to 8.0, but we recommend +migrating to a single-data-path setup before starting to upgrade. + +1. Take a snapshot to protect your data in case of disaster. + +2. Optionally, migrate the data away from the target node by using an +{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filter]: ++ +[source,console] +-------------------------------------------------- +PUT _cluster/settings +{ + "persistent": { + "cluster.routing.allocation.exclude._name": "target-node-name" + } +} +-------------------------------------------------- ++ +You can use the {ref}/cat-allocation.html[cat allocation API] to track progress +of this data migration. If some shards do not migrate then the +{ref}/cluster-allocation-explain.html[cluster allocation explain API] will help +you to determine why. + +3. Follow the steps in the +{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] +up to and including shutting the target node down. + +4. Ensure your cluster health is `yellow` or `green`, so that there is a copy +of every shard assigned to at least one of the other nodes in your cluster. + +5. If applicable, remove the allocation filter applied in the earlier step. ++ +[source,console] +-------------------------------------------------- +PUT _cluster/settings +{ + "persistent": { + "cluster.routing.allocation.exclude._name": null + } +} +-------------------------------------------------- + +6. Discard the data held by the stopped node by deleting the contents of its +data paths. + +7. Reconfigure your storage. For instance, combine your disks into a single +filesystem using LVM or Storage Spaces. Ensure that your reconfigured storage +has sufficient space for the data that it will hold. + +8. Reconfigure your node by adjusting the `path.data` setting in its +`elasticsearch.yml` file. If needed, install more nodes each with their own +`path.data` setting pointing at a separate data path. + +9. Start the new nodes and follow the rest of the +{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] for +them. + +10. Ensure your cluster health is `green`, so that every shard has been +assigned. + +You can alternatively add some number of single-data-path nodes to your +cluster, migrate all your data over to these new nodes using +{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filters], +and then remove the old nodes from the cluster. This approach will temporarily +double the size of your cluster so it will only work if you have the capacity to +expand your cluster like this. + +If you currently use multiple data paths but your cluster is not highly +available then you can migrate to a non-deprecated configuration by taking +a snapshot, creating a new cluster with the desired configuration and restoring +the snapshot into it. diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc index c24135a370914..27220f0d85149 100644 --- a/docs/reference/query-dsl/bool-query.asciidoc +++ b/docs/reference/query-dsl/bool-query.asciidoc @@ -13,21 +13,24 @@ occurrence types are: |======================================================================= |Occur |Description |`must` |The clause (query) must appear in matching documents and will -contribute to the score. +contribute to the score. Each query defined under a `must` acts as a logical "AND", returning only documents that match _all_ the specified queries. + +|`should` |The clause (query) should appear in the matching document. Each query defined under a `should` acts as a logical "OR", returning documents that match _any_ of the specified queries. |`filter` |The clause (query) must appear in matching documents. However unlike `must` the score of the query will be ignored. Filter clauses are executed in <>, meaning that scoring is ignored -and clauses are considered for caching. - -|`should` |The clause (query) should appear in the matching document. +and clauses are considered for caching. Each query defined under a `filter` acts as a logical "AND", returning only documents that match _all_ the specified queries. |`must_not` |The clause (query) must not appear in the matching documents. Clauses are executed in <> meaning that scoring is ignored and clauses are considered for caching. Because scoring is -ignored, a score of `0` for all documents is returned. +ignored, a score of `0` for all documents is returned. Each query defined under a `must_not` acts as a logical "NOT", returning only documents that do not match any of the specified queries. + |======================================================================= +The `must` and `should` clauses function as logical AND, OR operators, contributing to the scoring of results. However, these results will not be cached for faster retrieval. In contrast, the `filter` and `must_not` clauses are used to include or exclude results without impacting the score, unless used within a `constant_score` query. + The `bool` query takes a _more-matches-is-better_ approach, so the score from each matching `must` or `should` clause will be added together to provide the final `_score` for each document. diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 069021dddb69f..267e5a502efd4 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -303,12 +303,22 @@ matches. Defaults to `-1`. If unspecified or set to `-1`, there is no width restriction on the match. If set to `0`, the terms must appear next to each other. + +Internal intervals can have their own `max_gaps` values. In this case +we first find internal intervals with their `max_gaps` values, and then +combine them to see if a gap between internal intervals match +the value of `max_gaps` of the `all_of` rule. + +For examples, how `max_gaps` works, see <>. -- `ordered`:: (Optional, Boolean) If `true`, intervals produced by the rules should appear in the order in which they are specified. Defaults to `false`. +If `ordered` is `false`, intervals can appear in any order, +including overlapping with each other. + `filter`:: (Optional, <> rule object) Rule used to filter returned intervals. @@ -468,3 +478,94 @@ This query does *not* match a document containing the phrase `hot porridge is salty porridge`, because the intervals returned by the match query for `hot porridge` only cover the initial two terms in this document, and these do not overlap the intervals covering `salty`. + +[[interval-max_gaps-all-rule]] +===== max_gaps in `all_of` ordered and unordered rule + +The following `intervals` search returns documents containing `my +favorite food` without any gap, followed by `cold porridge` that +can have at most 4 tokens between "cold" and "porridge". These +two inner intervals when combined in the outer `all_of` interval, +must have at most 1 gap between each other. + +Because the `all_of` rule has `ordered` set to `true`, the inner +intervals are expected to be in the provided order. Thus, +this search would match a `my_text` value of `my favorite food is cold +porridge` but not `when it's cold my favorite food is porridge`. + +[source,console] +-------------------------------------------------- +POST _search +{ + "query": { + "intervals" : { + "my_text" : { + "all_of" : { + "ordered" : true, <1> + "max_gaps": 1, + "intervals" : [ + { + "match" : { + "query" : "my favorite food", + "max_gaps" : 0, + "ordered" : true + } + }, + { + "match" : { + "query" : "cold porridge", + "max_gaps" : 4, + "ordered" : true + } + } + ] + } + } + } + } +} +-------------------------------------------------- +<1> The `ordered` parameter is set to `true`, so intervals must appear in the order specified. + + +Below is the same query, but with `ordered` set to `false`. This means that +intervals can appear in any order, even overlap with each other. +Thus, this search would match a `my_text` value of `my favorite food is cold +porridge`, as well as `when it's cold my favorite food is porridge`. +In `when it's cold my favorite food is porridge`, `cold .... porridge` interval +overlaps with `my favorite food` interval. + +[source,console] +-------------------------------------------------- +POST _search +{ + "query": { + "intervals" : { + "my_text" : { + "all_of" : { + "ordered" : false, <1> + "max_gaps": 1, + "intervals" : [ + { + "match" : { + "query" : "my favorite food", + "max_gaps" : 0, + "ordered" : true + } + }, + { + "match" : { + "query" : "cold porridge", + "max_gaps" : 4, + "ordered" : true + } + } + ] + } + } + } + } +} +-------------------------------------------------- +<1> The `ordered` parameter is set to `true`, so intervals can appear in any order, +even overlap with each other. diff --git a/docs/reference/query-dsl/knn-query.asciidoc b/docs/reference/query-dsl/knn-query.asciidoc index daf9e9499a189..29ccadfa70a03 100644 --- a/docs/reference/query-dsl/knn-query.asciidoc +++ b/docs/reference/query-dsl/knn-query.asciidoc @@ -8,7 +8,8 @@ Finds the _k_ nearest vectors to a query vector, as measured by a similarity metric. _knn_ query finds nearest vectors through approximate search on indexed dense_vectors. The preferred way to do approximate kNN search is through the <> of a search request. _knn_ query is reserved for -expert cases, where there is a need to combine this query with other queries. +expert cases, where there is a need to combine this query with other queries, or +perform a kNN search against a <> field. [[knn-query-ex-request]] ==== Example request @@ -77,7 +78,8 @@ POST my-image-index/_search + -- (Required, string) The name of the vector field to search against. Must be a -<>. +<>, or a +<> with a compatible dense vector inference model. -- `query_vector`:: @@ -93,6 +95,7 @@ Either this or `query_vector_builder` must be provided. -- (Optional, object) Query vector builder. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector-builder] +If all queried fields are of type <>, the inference ID associated with the `semantic_text` field may be inferred. -- `k`:: @@ -100,7 +103,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector-builde -- (Optional, integer) The number of nearest neighbors to return from each shard. {es} collects `k` results from each shard, then merges them to find the global top results. -This value must be less than or equal to `num_candidates`. Defaults to `num_candidates`. +This value must be less than or equal to `num_candidates`. Defaults to search request size. -- `num_candidates`:: diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index eb840508aba39..67300ffca2d26 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -10,6 +10,10 @@ provided text is analyzed before matching. The `match` query is the standard query for performing a full-text search, including options for fuzzy matching. +`Match` will also work against <> fields, +however when performing `match` queries against `semantic_text` fields options +that specifically target lexical search such as `fuzziness` or `analyzer` will be ignored. + [[match-query-ex-request]] ==== Example request @@ -296,4 +300,3 @@ The example above creates a boolean query: that matches documents with the term `ny` or the conjunction `new AND york`. By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`. - diff --git a/docs/reference/query-dsl/semantic-query.asciidoc b/docs/reference/query-dsl/semantic-query.asciidoc index 11e19d6356081..914f4429f7f9c 100644 --- a/docs/reference/query-dsl/semantic-query.asciidoc +++ b/docs/reference/query-dsl/semantic-query.asciidoc @@ -117,79 +117,3 @@ GET my-index/_search } ------------------------------------------------------------ // TEST[skip: Requires inference endpoints] - - -[discrete] -[[advanced-search]] -==== Advanced search on `semantic_text` fields - -The `semantic` query uses default settings for searching on `semantic_text` fields for ease of use. -If you want to fine-tune a search on a `semantic_text` field, you need to know the task type used by the `inference_id` configured in `semantic_text`. -You can find the task type using the <>, and check the `task_type` associated with the {infer} service. -Depending on the `task_type`, use either the <> or the <> query for greater flexibility and customization. - -NOTE: While it is possible to use the `sparse_vector` query or the `knn` query -on a `semantic_text` field, it is not supported to use the `semantic_query` on a -`sparse_vector` or `dense_vector` field type. - - -[discrete] -[[search-sparse-inference]] -===== Search with `sparse_embedding` inference - -When the {infer} endpoint uses a `sparse_embedding` model, you can use a <> on a <> field in the following way: - -[source,console] ------------------------------------------------------------- -GET test-index/_search -{ - "query": { - "nested": { - "path": "inference_field.inference.chunks", - "query": { - "sparse_vector": { - "field": "inference_field.inference.chunks.embeddings", - "inference_id": "my-inference-id", - "query": "mountain lake" - } - } - } - } -} ------------------------------------------------------------- -// TEST[skip: Requires inference endpoints] - -You can customize the `sparse_vector` query to include specific settings, like <>. - - -[discrete] -[[search-text-inferece]] -===== Search with `text_embedding` inference - -When the {infer} endpoint uses a `text_embedding` model, you can use a <> on a `semantic_text` field in the following way: - -[source,console] ------------------------------------------------------------- -GET test-index/_search -{ - "query": { - "nested": { - "path": "inference_field.inference.chunks", - "query": { - "knn": { - "field": "inference_field.inference.chunks.embeddings", - "query_vector_builder": { - "text_embedding": { - "model_id": "my_inference_id", - "model_text": "mountain lake" - } - } - } - } - } - } -} ------------------------------------------------------------- -// TEST[skip: Requires inference endpoints] - -You can customize the `knn` query to include specific settings, like `num_candidates` and `k`. diff --git a/docs/reference/query-dsl/sparse-vector-query.asciidoc b/docs/reference/query-dsl/sparse-vector-query.asciidoc index 399cf29d4dd12..d46d649079d70 100644 --- a/docs/reference/query-dsl/sparse-vector-query.asciidoc +++ b/docs/reference/query-dsl/sparse-vector-query.asciidoc @@ -11,7 +11,8 @@ This can be achieved with one of two strategies: - Using an {nlp} model to convert query text into a list of token-weight pairs - Sending in precalculated token-weight pairs as query vectors -These token-weight pairs are then used in a query against a <>. +These token-weight pairs are then used in a query against a <> +or a <> field with a compatible sparse inference model. At query time, query vectors are calculated using the same inference model that was used to create the tokens. When querying, these query vectors are ORed together with their respective weights, which means scoring is effectively a <> calculation between stored dimensions and query dimensions. @@ -65,6 +66,7 @@ GET _search It must be the same inference ID that was used to create the tokens from the input text. Only one of `inference_id` and `query_vector` is allowed. If `inference_id` is specified, `query` must also be specified. +If all queried fields are of type <>, the inference ID associated with the `semantic_text` field will be inferred. `query`:: (Optional, string) The query text you want to use for search. @@ -291,5 +293,3 @@ GET my-index/_search //TEST[skip: Requires inference] NOTE: When performing <>, inference is performed on the local cluster. - - diff --git a/docs/reference/query-rules/apis/delete-query-rule.asciidoc b/docs/reference/query-rules/apis/delete-query-rule.asciidoc index 01b73033aa361..4d91092eaf8af 100644 --- a/docs/reference/query-rules/apis/delete-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/delete-query-rule.asciidoc @@ -6,6 +6,12 @@ Delete query rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Removes an individual query rule within an existing query ruleset. This is a destructive action that is only recoverable by re-adding the same rule via the <> API. diff --git a/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc b/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc index 31507dce3d12d..168310dcd4078 100644 --- a/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/delete-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Delete query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Removes a query ruleset and its associated data. This is a destructive action that is not recoverable. diff --git a/docs/reference/query-rules/apis/get-query-rule.asciidoc b/docs/reference/query-rules/apis/get-query-rule.asciidoc index 56713965d7bdc..742982e5897e1 100644 --- a/docs/reference/query-rules/apis/get-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/get-query-rule.asciidoc @@ -6,6 +6,12 @@ Get query rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Retrieves information about an individual query rule within a query ruleset. [[get-query-rule-request]] diff --git a/docs/reference/query-rules/apis/get-query-ruleset.asciidoc b/docs/reference/query-rules/apis/get-query-ruleset.asciidoc index 6bbcd157ea9e1..55574fb7c67e9 100644 --- a/docs/reference/query-rules/apis/get-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/get-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Get query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Retrieves information about a query ruleset. [[get-query-ruleset-request]] diff --git a/docs/reference/query-rules/apis/index.asciidoc b/docs/reference/query-rules/apis/index.asciidoc index fbeb477acacb5..7dcdf0e8f4e1c 100644 --- a/docs/reference/query-rules/apis/index.asciidoc +++ b/docs/reference/query-rules/apis/index.asciidoc @@ -7,6 +7,12 @@ --- +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + <> allow you to configure per-query rules that are applied at query time to queries that match the specific rule. Query rules are organized into _rulesets_, collections of query rules that are matched against incoming queries. Query rules are applied using the <>. diff --git a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc index 304b8c7745007..5f61bcb98b085 100644 --- a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc +++ b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc @@ -6,6 +6,12 @@ List query rulesets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Returns information about all stored query rulesets. Summary information on the number of rules per ruleset will be returned, and full details can be returned with the <> command. diff --git a/docs/reference/query-rules/apis/put-query-rule.asciidoc b/docs/reference/query-rules/apis/put-query-rule.asciidoc index 714ed9b096d1d..df33d22b39029 100644 --- a/docs/reference/query-rules/apis/put-query-rule.asciidoc +++ b/docs/reference/query-rules/apis/put-query-rule.asciidoc @@ -6,6 +6,12 @@ Create or update query rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Creates or updates an individual query rule within a query ruleset. [[put-query-rule-request]] diff --git a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc index df7ec100db076..ea689dc0bf305 100644 --- a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Create or update query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Creates or updates a query ruleset. [[put-query-ruleset-request]] diff --git a/docs/reference/query-rules/apis/test-query-ruleset.asciidoc b/docs/reference/query-rules/apis/test-query-ruleset.asciidoc index 4a670645cea6e..4a47754f572e5 100644 --- a/docs/reference/query-rules/apis/test-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/test-query-ruleset.asciidoc @@ -6,6 +6,12 @@ Tests query ruleset ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-query_rules[Query rules APIs]. +-- + Evaluates match criteria against a query ruleset to identify the rules that would match that criteria. preview::[] diff --git a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc index a024305588cae..b602ee5076434 100644 --- a/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc +++ b/docs/reference/quickstart/full-text-filtering-tutorial.asciidoc @@ -4,7 +4,7 @@ Basics: Full-text search and filtering ++++ -This is a hands-on introduction to the basics of full-text search with {es}, also known as _lexical search_, using the <> and <>. +This is a hands-on introduction to the basics of <> with {es}, also known as _lexical search_, using the <> and <>. You'll also learn how to filter data, to narrow down search results based on exact criteria. In this scenario, we're implementing a search function for a cooking blog. @@ -632,6 +632,7 @@ This tutorial introduced the basics of full-text search and filtering in {es}. Building a real-world search experience requires understanding many more advanced concepts and techniques. Here are some resources once you're ready to dive deeper: +* <>: Learn about the core components of full-text search in {es}. * <>: Understand all your options for searching and analyzing data in {es}. * <>: Understand how text is processed for full-text search. * <>: Learn about more advanced search techniques using the `_search` API, including semantic search. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index c3bf84fa600d2..9c0f0092214ed 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -156,10 +156,16 @@ See <>. The freeze index API was removed in 8.0. // tag::frozen-removal-explanation[] Frozen indices are no longer useful due to -https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent -improvements in heap memory usage]. +https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[improvements +in heap memory usage]. // end::frozen-removal-explanation[] +[role="exclude",id="unfreeze-index-api"] +=== Unfreeze index API + +The unfreeze index API was removed in 9.0. +include::redirects.asciidoc[tag=frozen-removal-explanation] + [role="exclude",id="ilm-freeze"] === Freeze {ilm-init} action @@ -1749,8 +1755,10 @@ See <>. === Frozen indices // tag::frozen-index-redirect[] - -For API documentation, see <>. +Older versions of {es} provided the option to reduce the amount of data kept in memory for an index, at the expense of +increasing search latency. This was known as 'freezing' the index. +include::redirects.asciidoc[tag=frozen-removal-explanation] +The freeze index API was removed in 8.0, and the unfreeze index API was removed in 9.0. // end::frozen-index-redirect[] [role="exclude",id="best_practices"] diff --git a/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc b/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc index addcd65f0e84a..bd05d76705eb9 100644 --- a/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc +++ b/docs/reference/repositories-metering-api/apis/clear-repositories-metering-archive.asciidoc @@ -5,6 +5,12 @@ Clear repositories metering archive ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-nodes-clear-repositories-metering-archive[Clear the archived repositories metering API]. +-- + Removes the archived repositories metering information present in the cluster. [[clear-repositories-metering-archive-api-request]] diff --git a/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc b/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc index 314f85a7dba52..6b6d98a69ded2 100644 --- a/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc +++ b/docs/reference/repositories-metering-api/apis/get-repositories-metering.asciidoc @@ -5,6 +5,12 @@ Get repositories metering information ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-nodes-get-repositories-metering-info[Get cluster repositories metering API]. +-- + Returns cluster repositories metering information. [[get-repositories-metering-api-request]] diff --git a/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc b/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc index b838e0fb213f4..ca9bffa32a917 100644 --- a/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc +++ b/docs/reference/repositories-metering-api/repositories-metering-apis.asciidoc @@ -4,6 +4,12 @@ experimental[] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-cluster[Cluster APIs]. +-- + You can use the following APIs to retrieve repositories metering information. This is an API used by Elastic's commercial offerings. diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 993bb8cb894f9..83c11c9256a67 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -668,6 +668,16 @@ node only. Defaults to `false`, which means information is retrieved from the master node. end::local[] +tag::local-deprecated-9.0.0[] +`local`:: +(Optional, Boolean) If `true`, the request retrieves information from the local +node only. Defaults to `false`, which means information is retrieved from +the master node. ++ +deprecated::[9.0.0, "The `?local` query parameter to this API has no effect, is now deprecated, and will be removed in a future version."] + +end::local-deprecated-9.0.0[] + tag::mappings[] `mappings`:: + diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 50c9f96ad81b0..9d9047c93cc97 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -6,9 +6,9 @@ {es} exposes REST APIs that are used by the UI components and can be called directly to configure and access {es} features. -[NOTE] -We are working on including more {es} APIs in this section. Some content might -not be included yet. +.New API reference +[sidebar] +For the most up-to-date API details, refer to {api-es}[{es} APIs]. * <> * <> diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index fda5b07d28205..b9547163b07ba 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -2,6 +2,12 @@ [[info-api]] == Info API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-info[Info APIs]. +-- + Provides general information about the installed {xpack} features. [discrete] diff --git a/docs/reference/rest-api/logstash/delete-pipeline.asciidoc b/docs/reference/rest-api/logstash/delete-pipeline.asciidoc index ff7494d34e615..2e56b0289a394 100644 --- a/docs/reference/rest-api/logstash/delete-pipeline.asciidoc +++ b/docs/reference/rest-api/logstash/delete-pipeline.asciidoc @@ -5,6 +5,12 @@ Delete {ls} pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + This API deletes a pipeline used for {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]. diff --git a/docs/reference/rest-api/logstash/get-pipeline.asciidoc b/docs/reference/rest-api/logstash/get-pipeline.asciidoc index 8409a5128d525..8e440f218aa09 100644 --- a/docs/reference/rest-api/logstash/get-pipeline.asciidoc +++ b/docs/reference/rest-api/logstash/get-pipeline.asciidoc @@ -5,6 +5,12 @@ Get {ls} pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + This API retrieves pipelines used for {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]. diff --git a/docs/reference/rest-api/logstash/index.asciidoc b/docs/reference/rest-api/logstash/index.asciidoc index a52be19a75df4..7f28020a8b183 100644 --- a/docs/reference/rest-api/logstash/index.asciidoc +++ b/docs/reference/rest-api/logstash/index.asciidoc @@ -2,6 +2,12 @@ [[logstash-apis]] == {ls} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + The following APIs are used to manage pipelines used by {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]: diff --git a/docs/reference/rest-api/logstash/put-pipeline.asciidoc b/docs/reference/rest-api/logstash/put-pipeline.asciidoc index a0d2f02f2bf6f..26af8d0f124cb 100644 --- a/docs/reference/rest-api/logstash/put-pipeline.asciidoc +++ b/docs/reference/rest-api/logstash/put-pipeline.asciidoc @@ -5,6 +5,12 @@ Create or update {ls} pipeline ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-logstash[{ls} APIs]. +-- + This API creates or updates a {ls} pipeline used for {logstash-ref}/logstash-centralized-pipeline-management.html[{ls} Central Management]. diff --git a/docs/reference/rest-api/root.asciidoc b/docs/reference/rest-api/root.asciidoc index 8821981c2afe3..aaf40b31db21c 100644 --- a/docs/reference/rest-api/root.asciidoc +++ b/docs/reference/rest-api/root.asciidoc @@ -4,6 +4,12 @@ Root API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-info[Info APIs]. +-- + The Elasticsearch API's base url returns its basic build, version, and cluster information. diff --git a/docs/reference/rest-api/security.asciidoc b/docs/reference/rest-api/security.asciidoc index 82cf38e52bd80..57726b074ac3f 100644 --- a/docs/reference/rest-api/security.asciidoc +++ b/docs/reference/rest-api/security.asciidoc @@ -1,6 +1,13 @@ [role="xpack"] [[security-api]] == Security APIs + +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + To use the security APIs, you must set `xpack.security.enabled` to `true` in the `elasticsearch.yml` file. diff --git a/docs/reference/rest-api/security/activate-user-profile.asciidoc b/docs/reference/rest-api/security/activate-user-profile.asciidoc index f6ce32e1bb19e..0db41937f2ff2 100644 --- a/docs/reference/rest-api/security/activate-user-profile.asciidoc +++ b/docs/reference/rest-api/security/activate-user-profile.asciidoc @@ -5,6 +5,12 @@ Activate user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/authenticate.asciidoc b/docs/reference/rest-api/security/authenticate.asciidoc index a02deb444628d..78d9cc2bcaa9f 100644 --- a/docs/reference/rest-api/security/authenticate.asciidoc +++ b/docs/reference/rest-api/security/authenticate.asciidoc @@ -5,6 +5,12 @@ Authenticate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Enables you to submit a request with a basic auth header to authenticate a user and retrieve information about the authenticated user. diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index 37f49f2445770..030e5e42bf29a 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -5,6 +5,12 @@ Bulk create or update roles API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Bulk adds and updates roles in the native realm. [[security-api-bulk-put-role-request]] diff --git a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc index b9978c89bef3a..899591b3276db 100644 --- a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc @@ -5,6 +5,12 @@ Bulk delete roles API ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Bulk deletes roles in the native realm. [[security-api-bulk-delete-role-request]] diff --git a/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc b/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc index faf87c67d1ccc..8206ac5d9d4f3 100644 --- a/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc +++ b/docs/reference/rest-api/security/bulk-update-api-keys.asciidoc @@ -6,6 +6,12 @@ Bulk update API keys ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + [[security-api-bulk-update-api-keys-request]] ==== {api-request-title} diff --git a/docs/reference/rest-api/security/change-password.asciidoc b/docs/reference/rest-api/security/change-password.asciidoc index c035661cdd707..f2dc45cad20b0 100644 --- a/docs/reference/rest-api/security/change-password.asciidoc +++ b/docs/reference/rest-api/security/change-password.asciidoc @@ -5,6 +5,12 @@ Change passwords ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Changes the passwords of users in the native realm and built-in users. [[security-api-change-password-request]] diff --git a/docs/reference/rest-api/security/clear-api-key-cache.asciidoc b/docs/reference/rest-api/security/clear-api-key-cache.asciidoc index 7828026b604f0..61f05a85dfaff 100644 --- a/docs/reference/rest-api/security/clear-api-key-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-api-key-cache.asciidoc @@ -5,6 +5,12 @@ Clear API key cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. diff --git a/docs/reference/rest-api/security/clear-cache.asciidoc b/docs/reference/rest-api/security/clear-cache.asciidoc index 3e4e5432768bd..270856ba28e6b 100644 --- a/docs/reference/rest-api/security/clear-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-cache.asciidoc @@ -5,6 +5,12 @@ Clear cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts users from the user cache. You can completely clear the cache or evict specific users. diff --git a/docs/reference/rest-api/security/clear-privileges-cache.asciidoc b/docs/reference/rest-api/security/clear-privileges-cache.asciidoc index 69a5743419d2c..cf615010779b7 100644 --- a/docs/reference/rest-api/security/clear-privileges-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-privileges-cache.asciidoc @@ -5,6 +5,12 @@ Clear privileges cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated. diff --git a/docs/reference/rest-api/security/clear-roles-cache.asciidoc b/docs/reference/rest-api/security/clear-roles-cache.asciidoc index 63c54b51dcf2f..edcb2c2723512 100644 --- a/docs/reference/rest-api/security/clear-roles-cache.asciidoc +++ b/docs/reference/rest-api/security/clear-roles-cache.asciidoc @@ -5,6 +5,12 @@ Clear roles cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts roles from the native role cache. [[security-api-clear-role-cache-request]] diff --git a/docs/reference/rest-api/security/clear-service-token-caches.asciidoc b/docs/reference/rest-api/security/clear-service-token-caches.asciidoc index ff4587549534d..26158220418e0 100644 --- a/docs/reference/rest-api/security/clear-service-token-caches.asciidoc +++ b/docs/reference/rest-api/security/clear-service-token-caches.asciidoc @@ -6,6 +6,12 @@ Clear service account token caches ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Evicts a subset of all entries from the <> token caches. diff --git a/docs/reference/rest-api/security/create-api-keys.asciidoc b/docs/reference/rest-api/security/create-api-keys.asciidoc index f740e9413e3e9..20f1c2cb155ce 100644 --- a/docs/reference/rest-api/security/create-api-keys.asciidoc +++ b/docs/reference/rest-api/security/create-api-keys.asciidoc @@ -5,6 +5,12 @@ Create API keys ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an API key for access without requiring basic authentication. [[security-api-create-api-key-request]] diff --git a/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc b/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc index d7d55bafc6d18..63b2f37063f9f 100644 --- a/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc +++ b/docs/reference/rest-api/security/create-cross-cluster-api-key.asciidoc @@ -1,11 +1,16 @@ [role="xpack"] [[security-api-create-cross-cluster-api-key]] === Create Cross-Cluster API key API - ++++ Create Cross-Cluster API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an API key of the `cross_cluster` type for the <> access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. On the contrary, a <> is meant to be used through the REST interface diff --git a/docs/reference/rest-api/security/create-role-mappings.asciidoc b/docs/reference/rest-api/security/create-role-mappings.asciidoc index e78d06a5676e4..71c931260865f 100644 --- a/docs/reference/rest-api/security/create-role-mappings.asciidoc +++ b/docs/reference/rest-api/security/create-role-mappings.asciidoc @@ -5,6 +5,12 @@ Create or update role mappings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates and updates role mappings. [[security-api-put-role-mapping-request]] diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index d23b9f06e2d87..0b0cd828140a1 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -5,6 +5,12 @@ Create or update roles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Adds and updates roles in the native realm. [[security-api-put-role-request]] diff --git a/docs/reference/rest-api/security/create-service-token.asciidoc b/docs/reference/rest-api/security/create-service-token.asciidoc index 9a3c2b678c92e..30195d89ff47d 100644 --- a/docs/reference/rest-api/security/create-service-token.asciidoc +++ b/docs/reference/rest-api/security/create-service-token.asciidoc @@ -5,6 +5,12 @@ Create service account tokens ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates a <> token for access without requiring basic authentication. diff --git a/docs/reference/rest-api/security/create-users.asciidoc b/docs/reference/rest-api/security/create-users.asciidoc index 428df1102329c..6d8e1cdaa9ba0 100644 --- a/docs/reference/rest-api/security/create-users.asciidoc +++ b/docs/reference/rest-api/security/create-users.asciidoc @@ -5,6 +5,12 @@ Create or update users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Adds and updates users in the native realm. These users are commonly referred to as _native users_. diff --git a/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc b/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc index 860708011cd25..00a80b3fd27bc 100644 --- a/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc +++ b/docs/reference/rest-api/security/delegate-pki-authentication.asciidoc @@ -5,6 +5,12 @@ Delegate PKI authentication ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Implements the exchange of an _X509Certificate_ chain into an {es} access token. diff --git a/docs/reference/rest-api/security/delete-app-privileges.asciidoc b/docs/reference/rest-api/security/delete-app-privileges.asciidoc index 39ac1706c6dc2..dad57bcac409c 100644 --- a/docs/reference/rest-api/security/delete-app-privileges.asciidoc +++ b/docs/reference/rest-api/security/delete-app-privileges.asciidoc @@ -5,6 +5,12 @@ Delete application privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Removes <>. [[security-api-delete-privilege-request]] diff --git a/docs/reference/rest-api/security/delete-role-mappings.asciidoc b/docs/reference/rest-api/security/delete-role-mappings.asciidoc index c5dd1aa9c909f..4ec7e3817b031 100644 --- a/docs/reference/rest-api/security/delete-role-mappings.asciidoc +++ b/docs/reference/rest-api/security/delete-role-mappings.asciidoc @@ -5,6 +5,12 @@ Delete role mappings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Removes role mappings. [[security-api-delete-role-mapping-request]] diff --git a/docs/reference/rest-api/security/delete-roles.asciidoc b/docs/reference/rest-api/security/delete-roles.asciidoc index 427e7c6b1860d..d30a4c5251809 100644 --- a/docs/reference/rest-api/security/delete-roles.asciidoc +++ b/docs/reference/rest-api/security/delete-roles.asciidoc @@ -5,6 +5,12 @@ Delete roles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Removes roles in the native realm. [[security-api-delete-role-request]] diff --git a/docs/reference/rest-api/security/delete-service-token.asciidoc b/docs/reference/rest-api/security/delete-service-token.asciidoc index b704fb9121263..f7c488e9e7139 100644 --- a/docs/reference/rest-api/security/delete-service-token.asciidoc +++ b/docs/reference/rest-api/security/delete-service-token.asciidoc @@ -5,6 +5,12 @@ Delete service account token ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Deletes <> tokens for a `service` in a specified `namespace`. diff --git a/docs/reference/rest-api/security/delete-users.asciidoc b/docs/reference/rest-api/security/delete-users.asciidoc index b08f99e809b44..ff781a7d9ef0b 100644 --- a/docs/reference/rest-api/security/delete-users.asciidoc +++ b/docs/reference/rest-api/security/delete-users.asciidoc @@ -5,6 +5,12 @@ Delete users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Deletes users from the native realm. [[security-api-delete-user-request]] diff --git a/docs/reference/rest-api/security/disable-user-profile.asciidoc b/docs/reference/rest-api/security/disable-user-profile.asciidoc index 35658f071679b..f665b8955e0d0 100644 --- a/docs/reference/rest-api/security/disable-user-profile.asciidoc +++ b/docs/reference/rest-api/security/disable-user-profile.asciidoc @@ -5,6 +5,12 @@ Disable user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/disable-users.asciidoc b/docs/reference/rest-api/security/disable-users.asciidoc index 9859085cb1824..3f8bc74d7e106 100644 --- a/docs/reference/rest-api/security/disable-users.asciidoc +++ b/docs/reference/rest-api/security/disable-users.asciidoc @@ -5,6 +5,12 @@ Disable users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Disables users in the native realm. diff --git a/docs/reference/rest-api/security/enable-user-profile.asciidoc b/docs/reference/rest-api/security/enable-user-profile.asciidoc index e27673b07f598..9f74d90f88b99 100644 --- a/docs/reference/rest-api/security/enable-user-profile.asciidoc +++ b/docs/reference/rest-api/security/enable-user-profile.asciidoc @@ -5,6 +5,12 @@ Enable user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/enable-users.asciidoc b/docs/reference/rest-api/security/enable-users.asciidoc index 04193e6c27944..db78a5c222116 100644 --- a/docs/reference/rest-api/security/enable-users.asciidoc +++ b/docs/reference/rest-api/security/enable-users.asciidoc @@ -5,6 +5,12 @@ Enable users ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Enables users in the native realm. diff --git a/docs/reference/rest-api/security/enroll-kibana.asciidoc b/docs/reference/rest-api/security/enroll-kibana.asciidoc index 55de31b5407d1..78bc0569fa193 100644 --- a/docs/reference/rest-api/security/enroll-kibana.asciidoc +++ b/docs/reference/rest-api/security/enroll-kibana.asciidoc @@ -4,6 +4,12 @@ Enroll {kib} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Enables a {kib} instance to configure itself for communication with a secured {es} cluster. NOTE: This API is currently intended for internal use only by {kib}. diff --git a/docs/reference/rest-api/security/enroll-node.asciidoc b/docs/reference/rest-api/security/enroll-node.asciidoc index 81cae73fb22d6..d5c2ce2a51746 100644 --- a/docs/reference/rest-api/security/enroll-node.asciidoc +++ b/docs/reference/rest-api/security/enroll-node.asciidoc @@ -4,6 +4,12 @@ Enroll node ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Allows a new node to join an existing cluster with security features enabled. [[security-api-node-enrollment-api-request]] diff --git a/docs/reference/rest-api/security/get-api-keys.asciidoc b/docs/reference/rest-api/security/get-api-keys.asciidoc index bf49297539895..6a133f136d4b3 100644 --- a/docs/reference/rest-api/security/get-api-keys.asciidoc +++ b/docs/reference/rest-api/security/get-api-keys.asciidoc @@ -5,6 +5,12 @@ Get API key information ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves information for one or more API keys. [[security-api-get-api-key-request]] diff --git a/docs/reference/rest-api/security/get-app-privileges.asciidoc b/docs/reference/rest-api/security/get-app-privileges.asciidoc index f0f3f1b69071c..c8bb709f96d5a 100644 --- a/docs/reference/rest-api/security/get-app-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-app-privileges.asciidoc @@ -5,6 +5,12 @@ Get application privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves <>. [[security-api-get-privileges-request]] diff --git a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc index 7f3d75b926780..08a03a5b1e830 100644 --- a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc @@ -6,6 +6,12 @@ Get builtin privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves the list of <> and <> that are available in this version of {es}. diff --git a/docs/reference/rest-api/security/get-role-mappings.asciidoc b/docs/reference/rest-api/security/get-role-mappings.asciidoc index 8272ec4d015a8..49063e775982b 100644 --- a/docs/reference/rest-api/security/get-role-mappings.asciidoc +++ b/docs/reference/rest-api/security/get-role-mappings.asciidoc @@ -5,6 +5,12 @@ Get role mappings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves role mappings. [[security-api-get-role-mapping-request]] diff --git a/docs/reference/rest-api/security/get-roles.asciidoc b/docs/reference/rest-api/security/get-roles.asciidoc index 3cc2f95c6ea7e..03f083e8202f0 100644 --- a/docs/reference/rest-api/security/get-roles.asciidoc +++ b/docs/reference/rest-api/security/get-roles.asciidoc @@ -5,6 +5,12 @@ Get roles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves roles in the native realm. [[security-api-get-role-request]] diff --git a/docs/reference/rest-api/security/get-service-accounts.asciidoc b/docs/reference/rest-api/security/get-service-accounts.asciidoc index 74f98f2602e34..b80136365d879 100644 --- a/docs/reference/rest-api/security/get-service-accounts.asciidoc +++ b/docs/reference/rest-api/security/get-service-accounts.asciidoc @@ -6,6 +6,12 @@ Get service accounts ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves information about <>. NOTE: Currently, only the `elastic/fleet-server` service account is available. @@ -270,6 +276,21 @@ GET /_security/service/elastic/fleet-server "view_index_metadata" ], "allow_restricted_indices": false + }, + { + "names": [ + "agentless-*", + ], + "privileges": [ + "read", + "write", + "monitor", + "create_index", + "auto_configure", + "maintenance", + "view_index_metadata" + ], + "allow_restricted_indices": false } ], "applications": [ diff --git a/docs/reference/rest-api/security/get-service-credentials.asciidoc b/docs/reference/rest-api/security/get-service-credentials.asciidoc index 3da6c3d860558..7a24aef059ae9 100644 --- a/docs/reference/rest-api/security/get-service-credentials.asciidoc +++ b/docs/reference/rest-api/security/get-service-credentials.asciidoc @@ -1,11 +1,16 @@ [role="xpack"] [[security-api-get-service-credentials]] === Get service account credentials API - ++++ Get service account credentials ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves all service credentials for a <>. [[security-api-get-service-credentials-request]] diff --git a/docs/reference/rest-api/security/get-settings.asciidoc b/docs/reference/rest-api/security/get-settings.asciidoc index 46e4e0cf529bb..c99b9bcedba21 100644 --- a/docs/reference/rest-api/security/get-settings.asciidoc +++ b/docs/reference/rest-api/security/get-settings.asciidoc @@ -5,6 +5,12 @@ Get Security settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves settings for the security internal indices. [[security-api-get-settings-prereqs]] diff --git a/docs/reference/rest-api/security/get-tokens.asciidoc b/docs/reference/rest-api/security/get-tokens.asciidoc index 9f5261a477bcb..eefc86528ef43 100644 --- a/docs/reference/rest-api/security/get-tokens.asciidoc +++ b/docs/reference/rest-api/security/get-tokens.asciidoc @@ -5,6 +5,12 @@ Get token ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates a bearer token for access without requiring basic authentication. [[security-api-get-token-request]] diff --git a/docs/reference/rest-api/security/get-user-privileges.asciidoc b/docs/reference/rest-api/security/get-user-privileges.asciidoc index 8115cd365c5a3..4e0d68b4b679c 100644 --- a/docs/reference/rest-api/security/get-user-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-user-privileges.asciidoc @@ -5,6 +5,12 @@ Get user privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves the <> for the logged in user. diff --git a/docs/reference/rest-api/security/get-user-profile.asciidoc b/docs/reference/rest-api/security/get-user-profile.asciidoc index 9f0ba64d136ac..60732cf0ab395 100644 --- a/docs/reference/rest-api/security/get-user-profile.asciidoc +++ b/docs/reference/rest-api/security/get-user-profile.asciidoc @@ -5,6 +5,12 @@ Get user profiles ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/get-users.asciidoc b/docs/reference/rest-api/security/get-users.asciidoc index 59a390f6f2538..8770c90cdec0a 100644 --- a/docs/reference/rest-api/security/get-users.asciidoc +++ b/docs/reference/rest-api/security/get-users.asciidoc @@ -5,8 +5,13 @@ Get users ++++ -Retrieves information about users in the native realm and built-in users. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- +Retrieves information about users in the native realm and built-in users. [[security-api-get-user-request]] ==== {api-request-title} diff --git a/docs/reference/rest-api/security/grant-api-keys.asciidoc b/docs/reference/rest-api/security/grant-api-keys.asciidoc index 10c109b00bbf9..4ab599c18dd61 100644 --- a/docs/reference/rest-api/security/grant-api-keys.asciidoc +++ b/docs/reference/rest-api/security/grant-api-keys.asciidoc @@ -5,6 +5,12 @@ Grant API keys ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an API key on behalf of another user. [[security-api-grant-api-key-request]] diff --git a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc index afadf394aa43c..3fc825b60bce8 100644 --- a/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc +++ b/docs/reference/rest-api/security/has-privileges-user-profile.asciidoc @@ -5,6 +5,12 @@ Has privileges user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/has-privileges.asciidoc b/docs/reference/rest-api/security/has-privileges.asciidoc index 229ffb4997273..1e08b41d92fe0 100644 --- a/docs/reference/rest-api/security/has-privileges.asciidoc +++ b/docs/reference/rest-api/security/has-privileges.asciidoc @@ -6,6 +6,12 @@ ++++ [[security-api-has-privilege]] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Determines whether the logged in user has a specified list of privileges. [[security-api-has-privileges-request]] diff --git a/docs/reference/rest-api/security/invalidate-api-keys.asciidoc b/docs/reference/rest-api/security/invalidate-api-keys.asciidoc index 57a36a97634ac..27c5a8c831808 100644 --- a/docs/reference/rest-api/security/invalidate-api-keys.asciidoc +++ b/docs/reference/rest-api/security/invalidate-api-keys.asciidoc @@ -5,6 +5,12 @@ Invalidate API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Invalidates one or more API keys. [[security-api-invalidate-api-key-request]] diff --git a/docs/reference/rest-api/security/invalidate-tokens.asciidoc b/docs/reference/rest-api/security/invalidate-tokens.asciidoc index 58f20fdcdc425..9a6fe6185b477 100644 --- a/docs/reference/rest-api/security/invalidate-tokens.asciidoc +++ b/docs/reference/rest-api/security/invalidate-tokens.asciidoc @@ -5,6 +5,12 @@ Invalidate token ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Invalidates one or more access tokens or refresh tokens. [[security-api-invalidate-token-request]] diff --git a/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc b/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc index 282a054717e6d..7bf8a1aad3ee1 100644 --- a/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc +++ b/docs/reference/rest-api/security/oidc-authenticate-api.asciidoc @@ -5,6 +5,12 @@ OpenID Connect authenticate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits the response to an oAuth 2.0 authentication request for consumption from {es}. Upon successful validation, {es} will respond with an {es} internal Access Token and Refresh Token that can be subsequently used for authentication. diff --git a/docs/reference/rest-api/security/oidc-logout-api.asciidoc b/docs/reference/rest-api/security/oidc-logout-api.asciidoc index a181f4c836fbd..d8bd60b3cd85b 100644 --- a/docs/reference/rest-api/security/oidc-logout-api.asciidoc +++ b/docs/reference/rest-api/security/oidc-logout-api.asciidoc @@ -5,6 +5,12 @@ OpenID Connect logout ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a request to invalidate a refresh token and an access token that was generated as a response to a call to `/_security/oidc/authenticate`. diff --git a/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc b/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc index 4452020b4547c..227f154934306 100644 --- a/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc +++ b/docs/reference/rest-api/security/oidc-prepare-authentication-api.asciidoc @@ -5,6 +5,12 @@ OpenID Connect prepare authentication ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates an oAuth 2.0 authentication request as a URL string based on the configuration of the respective OpenID Connect authentication realm in {es}. diff --git a/docs/reference/rest-api/security/put-app-privileges.asciidoc b/docs/reference/rest-api/security/put-app-privileges.asciidoc index 28be4c08c4e71..cf903d99a724d 100644 --- a/docs/reference/rest-api/security/put-app-privileges.asciidoc +++ b/docs/reference/rest-api/security/put-app-privileges.asciidoc @@ -5,6 +5,12 @@ Create or update application privileges ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Adds or updates <>. [[security-api-put-privileges-request]] diff --git a/docs/reference/rest-api/security/query-api-key.asciidoc b/docs/reference/rest-api/security/query-api-key.asciidoc index 513cb99a55a4c..1dec37c166f89 100644 --- a/docs/reference/rest-api/security/query-api-key.asciidoc +++ b/docs/reference/rest-api/security/query-api-key.asciidoc @@ -1,11 +1,16 @@ [role="xpack"] [[security-api-query-api-key]] === Query API key information API - ++++ Query API key information ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + //// [source,console] ---- diff --git a/docs/reference/rest-api/security/query-role.asciidoc b/docs/reference/rest-api/security/query-role.asciidoc index 937bd263140fc..acdfbb45b84f6 100644 --- a/docs/reference/rest-api/security/query-role.asciidoc +++ b/docs/reference/rest-api/security/query-role.asciidoc @@ -6,6 +6,12 @@ Query Role ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Retrieves roles with <> in a <> fashion. [[security-api-query-role-request]] diff --git a/docs/reference/rest-api/security/saml-authenticate-api.asciidoc b/docs/reference/rest-api/security/saml-authenticate-api.asciidoc index aa556a42d699c..4c156df6e1bb3 100644 --- a/docs/reference/rest-api/security/saml-authenticate-api.asciidoc +++ b/docs/reference/rest-api/security/saml-authenticate-api.asciidoc @@ -5,6 +5,12 @@ SAML authenticate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a SAML `Response` message to {es} for consumption. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc b/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc index 1fb4ab1581abc..d4847fb481cdb 100644 --- a/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc +++ b/docs/reference/rest-api/security/saml-complete-logout-api.asciidoc @@ -5,6 +5,12 @@ SAML complete logout ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Verifies the logout response sent from the SAML IdP. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-invalidate-api.asciidoc b/docs/reference/rest-api/security/saml-invalidate-api.asciidoc index 21c10341c6fee..fb233f3903d1c 100644 --- a/docs/reference/rest-api/security/saml-invalidate-api.asciidoc +++ b/docs/reference/rest-api/security/saml-invalidate-api.asciidoc @@ -5,6 +5,12 @@ SAML invalidate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a SAML LogoutRequest message to {es} for consumption. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-logout-api.asciidoc b/docs/reference/rest-api/security/saml-logout-api.asciidoc index 71729365865dc..560a713b5bf15 100644 --- a/docs/reference/rest-api/security/saml-logout-api.asciidoc +++ b/docs/reference/rest-api/security/saml-logout-api.asciidoc @@ -5,6 +5,12 @@ SAML logout ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Submits a request to invalidate an access token and refresh token. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc b/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc index b62d3d2ac9f75..60b3451cc531d 100644 --- a/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc +++ b/docs/reference/rest-api/security/saml-prepare-authentication-api.asciidoc @@ -5,6 +5,12 @@ SAML prepare authentication ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Creates a SAML authentication request (``) as a URL string, based on the configuration of the respective SAML realm in {es}. NOTE: This API is intended for use by custom web applications other than {kib}. diff --git a/docs/reference/rest-api/security/saml-sp-metadata.asciidoc b/docs/reference/rest-api/security/saml-sp-metadata.asciidoc index deecbf5f0e664..0f66b7a9f76f8 100644 --- a/docs/reference/rest-api/security/saml-sp-metadata.asciidoc +++ b/docs/reference/rest-api/security/saml-sp-metadata.asciidoc @@ -5,6 +5,12 @@ SAML service provider metadata ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Generate SAML metadata for a SAML 2.0 Service Provider. [[security-api-saml-sp-metadata-request]] diff --git a/docs/reference/rest-api/security/ssl.asciidoc b/docs/reference/rest-api/security/ssl.asciidoc index 3b8ba0eab6888..78b9aee301096 100644 --- a/docs/reference/rest-api/security/ssl.asciidoc +++ b/docs/reference/rest-api/security/ssl.asciidoc @@ -5,6 +5,12 @@ SSL certificate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + The `certificates` API enables you to retrieve information about the X.509 certificates that are used to encrypt communications in your {es} cluster. diff --git a/docs/reference/rest-api/security/suggest-user-profile.asciidoc b/docs/reference/rest-api/security/suggest-user-profile.asciidoc index ad01987a1e704..4ea04864e4a6e 100644 --- a/docs/reference/rest-api/security/suggest-user-profile.asciidoc +++ b/docs/reference/rest-api/security/suggest-user-profile.asciidoc @@ -5,6 +5,12 @@ Suggest user profile ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/security/update-api-key.asciidoc b/docs/reference/rest-api/security/update-api-key.asciidoc index f297e3922a657..12dd883b5955d 100644 --- a/docs/reference/rest-api/security/update-api-key.asciidoc +++ b/docs/reference/rest-api/security/update-api-key.asciidoc @@ -6,6 +6,12 @@ Update API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + [[security-api-update-api-key-request]] ==== {api-request-title} diff --git a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc index b90cb6368eefb..5c5f55b73597a 100644 --- a/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc +++ b/docs/reference/rest-api/security/update-cross-cluster-api-key.asciidoc @@ -6,6 +6,12 @@ Update Cross-Cluster API key ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Update an existing cross-cluster API Key that is used for <> access. diff --git a/docs/reference/rest-api/security/update-settings.asciidoc b/docs/reference/rest-api/security/update-settings.asciidoc index b227bb70b31d7..3ec2ef98153c4 100644 --- a/docs/reference/rest-api/security/update-settings.asciidoc +++ b/docs/reference/rest-api/security/update-settings.asciidoc @@ -5,6 +5,12 @@ Update Security settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + Updates the settings of the security internal indices. diff --git a/docs/reference/rest-api/security/update-user-profile-data.asciidoc b/docs/reference/rest-api/security/update-user-profile-data.asciidoc index 01fa5e11d10e8..c461c169a517a 100644 --- a/docs/reference/rest-api/security/update-user-profile-data.asciidoc +++ b/docs/reference/rest-api/security/update-user-profile-data.asciidoc @@ -5,6 +5,12 @@ Update user profile data ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-security[Security APIs]. +-- + NOTE: The user profile feature is designed only for use by {kib} and Elastic’s {observability}, {ents}, and {elastic-sec} solutions. Individual users and external applications should not call this API directly. Elastic reserves diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index b57d2aee9d190..cb9d80cc3a97b 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -2,6 +2,12 @@ [[usage-api]] == Usage API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-xpack[Usage APIs]. +-- + Provides usage information about the installed {xpack} features. [discrete] @@ -207,16 +213,8 @@ GET /_xpack/usage "inference": { "available" : true, "enabled" : true, - "models" : [{ - "service": "elasticsearch", - "task_type": "SPARSE_EMBEDDING", - "count": 1 - }, - { - "service": "elasticsearch", - "task_type": "TEXT_EMBEDDING", - "count": 1 - }, + "models" : [ + ... ] }, "logstash" : { @@ -523,7 +521,10 @@ GET /_xpack/usage "available": true, "enabled": false, "indices_count": 0, - "indices_with_synthetic_source": 0 + "indices_with_synthetic_source": 0, + "num_docs": 0, + "size_in_bytes": 0, + "has_custom_cutoff_date": false } } ------------------------------------------------------------ @@ -535,6 +536,7 @@ GET /_xpack/usage // TESTRESPONSE[s/"policy_stats" : \[[^\]]*\]/"policy_stats" : $body.$_path/] // TESTRESPONSE[s/"slm" : \{[^\}]*\},/"slm" : $body.$_path,/] // TESTRESPONSE[s/"health_api" : \{[^\}]*\}\s*\}/"health_api" : $body.$_path/] +// TESTRESPONSE[s/"models" : \[[^\]]*\]/"models" : $body.$_path/] // TESTRESPONSE[s/"data_streams" : \{[^\}]*\},/"data_streams" : $body.$_path,/] // TESTRESPONSE[s/ : true/ : $body.$_path/] // TESTRESPONSE[s/ : false/ : $body.$_path/] @@ -551,4 +553,5 @@ GET /_xpack/usage // 5. All of the numbers and strings on the right hand side of *every* field in // the response are ignored. So we're really only asserting things about the // the shape of this response, not the values in it. -// 6. Ignore the contents of data streams until the failure store is tech preview. +// 6. Ignore the contents of the `inference.models` array because the models might not yet have been initialized +// 7. Ignore the contents of data streams until the failure store is tech preview. diff --git a/docs/reference/rest-api/watcher.asciidoc b/docs/reference/rest-api/watcher.asciidoc index 4c4ce1ab7ee00..227eda6605465 100644 --- a/docs/reference/rest-api/watcher.asciidoc +++ b/docs/reference/rest-api/watcher.asciidoc @@ -2,6 +2,12 @@ [[watcher-api]] == Watcher APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + * <> * <> * <> diff --git a/docs/reference/rest-api/watcher/ack-watch.asciidoc b/docs/reference/rest-api/watcher/ack-watch.asciidoc index 3c45b068a34c6..b3ea9b5d6b41f 100644 --- a/docs/reference/rest-api/watcher/ack-watch.asciidoc +++ b/docs/reference/rest-api/watcher/ack-watch.asciidoc @@ -5,6 +5,12 @@ Ack watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + <> enables you to manually throttle execution of the watch's actions. diff --git a/docs/reference/rest-api/watcher/activate-watch.asciidoc b/docs/reference/rest-api/watcher/activate-watch.asciidoc index d8af79854c83e..c37d85cc50299 100644 --- a/docs/reference/rest-api/watcher/activate-watch.asciidoc +++ b/docs/reference/rest-api/watcher/activate-watch.asciidoc @@ -5,6 +5,12 @@ Activate watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + A watch can be either <>. This API enables you to activate a currently inactive watch. diff --git a/docs/reference/rest-api/watcher/deactivate-watch.asciidoc b/docs/reference/rest-api/watcher/deactivate-watch.asciidoc index ba4170174343a..058ada195f97e 100644 --- a/docs/reference/rest-api/watcher/deactivate-watch.asciidoc +++ b/docs/reference/rest-api/watcher/deactivate-watch.asciidoc @@ -5,6 +5,12 @@ Deactivate watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + A watch can be either <>. This API enables you to deactivate a currently active watch. diff --git a/docs/reference/rest-api/watcher/delete-watch.asciidoc b/docs/reference/rest-api/watcher/delete-watch.asciidoc index 3ffcb43ed65e2..536ec293ab8fd 100644 --- a/docs/reference/rest-api/watcher/delete-watch.asciidoc +++ b/docs/reference/rest-api/watcher/delete-watch.asciidoc @@ -5,6 +5,12 @@ Delete watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Removes a watch from {watcher}. [[watcher-api-delete-watch-request]] diff --git a/docs/reference/rest-api/watcher/execute-watch.asciidoc b/docs/reference/rest-api/watcher/execute-watch.asciidoc index 7acecf1709034..eab15a152b154 100644 --- a/docs/reference/rest-api/watcher/execute-watch.asciidoc +++ b/docs/reference/rest-api/watcher/execute-watch.asciidoc @@ -5,6 +5,12 @@ Execute watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Forces the execution of a stored watch. [[watcher-api-execute-watch-request]] diff --git a/docs/reference/rest-api/watcher/get-settings.asciidoc b/docs/reference/rest-api/watcher/get-settings.asciidoc index c5773e6ee32b0..80d86cc455daa 100644 --- a/docs/reference/rest-api/watcher/get-settings.asciidoc +++ b/docs/reference/rest-api/watcher/get-settings.asciidoc @@ -5,6 +5,12 @@ Get Watcher settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + This API allows a user to retrieve the user-configurable settings for the Watcher internal index (`.watches`). Only a subset of the index settings—those that are user-configurable—will be shown. This includes: - `index.auto_expand_replicas` diff --git a/docs/reference/rest-api/watcher/get-watch.asciidoc b/docs/reference/rest-api/watcher/get-watch.asciidoc index e80bfed88b6e5..1b5dbe2d0e47e 100644 --- a/docs/reference/rest-api/watcher/get-watch.asciidoc +++ b/docs/reference/rest-api/watcher/get-watch.asciidoc @@ -5,6 +5,12 @@ Get watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Retrieves a watch by its ID. [[watcher-api-get-watch-request]] diff --git a/docs/reference/rest-api/watcher/put-watch.asciidoc b/docs/reference/rest-api/watcher/put-watch.asciidoc index deab44f106fb6..134e8149fde79 100644 --- a/docs/reference/rest-api/watcher/put-watch.asciidoc +++ b/docs/reference/rest-api/watcher/put-watch.asciidoc @@ -5,6 +5,12 @@ Create or update watch ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Either registers a new watch in {watcher} or updates an existing one. [[watcher-api-put-watch-request]] diff --git a/docs/reference/rest-api/watcher/query-watches.asciidoc b/docs/reference/rest-api/watcher/query-watches.asciidoc index 7a006243ed7f4..b96261d4a47ea 100644 --- a/docs/reference/rest-api/watcher/query-watches.asciidoc +++ b/docs/reference/rest-api/watcher/query-watches.asciidoc @@ -5,6 +5,12 @@ Query watches ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Retrieves all registered watches. [[watcher-api-query-watches-request]] diff --git a/docs/reference/rest-api/watcher/start.asciidoc b/docs/reference/rest-api/watcher/start.asciidoc index b153410ed2901..15606ba0a8571 100644 --- a/docs/reference/rest-api/watcher/start.asciidoc +++ b/docs/reference/rest-api/watcher/start.asciidoc @@ -5,6 +5,12 @@ Start watch service ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Starts the {watcher} service if it is not already running. [[watcher-api-start-request]] diff --git a/docs/reference/rest-api/watcher/stats.asciidoc b/docs/reference/rest-api/watcher/stats.asciidoc index 2dbca69a67616..e0bdc1b5a8892 100644 --- a/docs/reference/rest-api/watcher/stats.asciidoc +++ b/docs/reference/rest-api/watcher/stats.asciidoc @@ -6,6 +6,12 @@ Get {watcher} stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Retrieves the current {watcher} metrics. [[watcher-api-stats-request]] diff --git a/docs/reference/rest-api/watcher/stop.asciidoc b/docs/reference/rest-api/watcher/stop.asciidoc index 50acd6e9eb2d1..272899a2cfa3f 100644 --- a/docs/reference/rest-api/watcher/stop.asciidoc +++ b/docs/reference/rest-api/watcher/stop.asciidoc @@ -5,6 +5,12 @@ Stop watch service ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + Stops the {watcher} service if it is running. [[watcher-api-stop-request]] diff --git a/docs/reference/rest-api/watcher/update-settings.asciidoc b/docs/reference/rest-api/watcher/update-settings.asciidoc index 8602c6776997d..9ad38064e34ab 100644 --- a/docs/reference/rest-api/watcher/update-settings.asciidoc +++ b/docs/reference/rest-api/watcher/update-settings.asciidoc @@ -5,6 +5,12 @@ Update Watcher settings ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-watcher[{watcher} APIs]. +-- + This API allows a user to modify the settings for the Watcher internal index (`.watches`). Only a subset of settings are allowed to by modified. This includes: - `index.auto_expand_replicas` diff --git a/docs/reference/rollup/apis/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc index c563e705039e2..03f5349e15d4f 100644 --- a/docs/reference/rollup/apis/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -8,6 +8,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Deletes an existing {rollup-job}. [[rollup-delete-job-request]] diff --git a/docs/reference/rollup/apis/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc index fcafbbe95159b..9fff4d665f5fd 100644 --- a/docs/reference/rollup/apis/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Retrieves the configuration, stats, and status of {rollup-jobs}. [[rollup-get-job-request]] diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index c52e7a042e0ca..a60f20a3de5bf 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -12,6 +12,12 @@ WARNING: From 8.15.0 invoking this API in a cluster with no rollup usage will fa deprecation and planned removal. A cluster either needs to contain a rollup job or a rollup index in order for this API to be allowed to execute. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Creates a {rollup-job}. [[rollup-put-job-api-request]] diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc index 95f652f6d4415..be1c3ed171a23 100644 --- a/docs/reference/rollup/apis/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Returns the capabilities of any {rollup-jobs} that have been configured for a specific index or index pattern. diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index c5b729f2e52e6..830cc332e8f40 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc index 491dcc6c38ae2..088a74973806b 100644 --- a/docs/reference/rollup/apis/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -7,6 +7,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Enables searching rolled-up data using the standard Query DSL. [[rollup-search-request]] diff --git a/docs/reference/rollup/apis/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc index c102c26ea5d8e..dbeed8b09d1c8 100644 --- a/docs/reference/rollup/apis/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -8,6 +8,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Starts an existing, stopped {rollup-job}. [[rollup-start-job-request]] diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index 61e561b4ceac9..8c0fd6ab2f3af 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -8,6 +8,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Use <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + Stops an existing, started {rollup-job}. [[rollup-stop-job-request]] diff --git a/docs/reference/rollup/rollup-apis.asciidoc b/docs/reference/rollup/rollup-apis.asciidoc index 44833a0846c2f..71922f0932a0e 100644 --- a/docs/reference/rollup/rollup-apis.asciidoc +++ b/docs/reference/rollup/rollup-apis.asciidoc @@ -4,6 +4,12 @@ deprecated::[8.11.0,"Rollups will be removed in a future version. Please <> to <> instead."] +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-rollup[Rollup APIs]. +-- + [discrete] [[rollup-jobs-endpoint]] === Jobs diff --git a/docs/reference/scripting/apis/create-stored-script-api.asciidoc b/docs/reference/scripting/apis/create-stored-script-api.asciidoc index dab1314e65dc4..5636e212180b3 100644 --- a/docs/reference/scripting/apis/create-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/create-stored-script-api.asciidoc @@ -4,6 +4,12 @@ Create or update stored script ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Creates or updates a <> or <>. diff --git a/docs/reference/scripting/apis/delete-stored-script-api.asciidoc b/docs/reference/scripting/apis/delete-stored-script-api.asciidoc index e233922c9a7d5..c6e570f2013e4 100644 --- a/docs/reference/scripting/apis/delete-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/delete-stored-script-api.asciidoc @@ -4,6 +4,12 @@ Delete stored script ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Deletes a <> or <>. diff --git a/docs/reference/scripting/apis/get-script-contexts-api.asciidoc b/docs/reference/scripting/apis/get-script-contexts-api.asciidoc index ca24c97e494ee..0ef6eccf947ad 100644 --- a/docs/reference/scripting/apis/get-script-contexts-api.asciidoc +++ b/docs/reference/scripting/apis/get-script-contexts-api.asciidoc @@ -4,6 +4,12 @@ Get script contexts ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Retrieves a list of supported script contexts and their methods. [source,console] diff --git a/docs/reference/scripting/apis/get-script-languages-api.asciidoc b/docs/reference/scripting/apis/get-script-languages-api.asciidoc index dd5935bc4dcd8..a35c979b2fccb 100644 --- a/docs/reference/scripting/apis/get-script-languages-api.asciidoc +++ b/docs/reference/scripting/apis/get-script-languages-api.asciidoc @@ -4,6 +4,12 @@ Get script languages ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Retrieves a list of supported <> and their contexts. diff --git a/docs/reference/scripting/apis/get-stored-script-api.asciidoc b/docs/reference/scripting/apis/get-stored-script-api.asciidoc index fffeb24e0331f..d2e5a7beedad1 100644 --- a/docs/reference/scripting/apis/get-stored-script-api.asciidoc +++ b/docs/reference/scripting/apis/get-stored-script-api.asciidoc @@ -4,6 +4,12 @@ Get stored script ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Retrieves a <> or <>. diff --git a/docs/reference/scripting/apis/script-apis.asciidoc b/docs/reference/scripting/apis/script-apis.asciidoc index e344cb00ee6fe..f5499ade8458a 100644 --- a/docs/reference/scripting/apis/script-apis.asciidoc +++ b/docs/reference/scripting/apis/script-apis.asciidoc @@ -1,6 +1,12 @@ [[script-apis]] == Script APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-script[Script APIs]. +-- + Use the following APIs to manage, store, and test your <>. diff --git a/docs/reference/scripting/using.asciidoc b/docs/reference/scripting/using.asciidoc index d4b4fd91e3e37..7dc1e38c62e78 100644 --- a/docs/reference/scripting/using.asciidoc +++ b/docs/reference/scripting/using.asciidoc @@ -201,8 +201,13 @@ when you're creating <>. [[script-stored-scripts]] === Store and retrieve scripts You can store and retrieve scripts from the cluster state using the -<>. Stored scripts reduce compilation -time and make searches faster. +<>. Stored scripts allow you to reference +shared scripts for operations like scoring, aggregating, filtering, and +reindexing. Instead of embedding scripts inline in each query, you can reference +these shared operations. + +Stored scripts can also reduce request payload size. Depending on script size +and request frequency, this can help lower latency and data transfer costs. NOTE: Unlike regular scripts, stored scripts require that you specify a script language using the `lang` parameter. diff --git a/docs/reference/search-application/apis/delete-search-application.asciidoc b/docs/reference/search-application/apis/delete-search-application.asciidoc index 4043942b09503..52a32247a79e1 100644 --- a/docs/reference/search-application/apis/delete-search-application.asciidoc +++ b/docs/reference/search-application/apis/delete-search-application.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[delete-search-application]] === Delete Search Application - -beta::[] - ++++ Delete Search Application ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Removes a Search Application and its associated alias. Indices attached to the Search Application are not removed. diff --git a/docs/reference/search-application/apis/get-search-application.asciidoc b/docs/reference/search-application/apis/get-search-application.asciidoc index f0c107011eb40..adeb84bdbe0ae 100644 --- a/docs/reference/search-application/apis/get-search-application.asciidoc +++ b/docs/reference/search-application/apis/get-search-application.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[get-search-application]] === Get Search Application - -beta::[] - ++++ Get Search Application ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Retrieves information about a search application. [[get-search-application-request]] diff --git a/docs/reference/search-application/apis/index.asciidoc b/docs/reference/search-application/apis/index.asciidoc index 1df38f6a841cc..a01b93a1ee4ac 100644 --- a/docs/reference/search-application/apis/index.asciidoc +++ b/docs/reference/search-application/apis/index.asciidoc @@ -9,6 +9,12 @@ beta::[] --- +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + Use Search Application APIs to manage tasks and resources related to Search Applications. * <> diff --git a/docs/reference/search-application/apis/list-search-applications.asciidoc b/docs/reference/search-application/apis/list-search-applications.asciidoc index 3cc077bf682d6..33bd8ddee009e 100644 --- a/docs/reference/search-application/apis/list-search-applications.asciidoc +++ b/docs/reference/search-application/apis/list-search-applications.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[list-search-applications]] === List Search Applications - -beta::[] - ++++ List Search Applications ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Returns information about Search Applications. [[list-search-applications-request]] diff --git a/docs/reference/search-application/apis/put-search-application.asciidoc b/docs/reference/search-application/apis/put-search-application.asciidoc index dc5e20ec40b7f..bb8edb5368045 100644 --- a/docs/reference/search-application/apis/put-search-application.asciidoc +++ b/docs/reference/search-application/apis/put-search-application.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[put-search-application]] === Put Search Application - -beta::[] - ++++ Put Search Application ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Creates or updates a Search Application. [[put-search-application-request]] diff --git a/docs/reference/search-application/apis/search-application-render-query.asciidoc b/docs/reference/search-application/apis/search-application-render-query.asciidoc index 687176b4fb070..42586c7186bc8 100644 --- a/docs/reference/search-application/apis/search-application-render-query.asciidoc +++ b/docs/reference/search-application/apis/search-application-render-query.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[search-application-render-query]] === Render Search Application Query - -preview::[] - ++++ Render Search Application Query ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +preview::[] + Given specified query parameters, generates an {es} query using the search template associated with the search application or a default template if none is specified. Unspecified template parameters will be assigned their default values (if applicable). diff --git a/docs/reference/search-application/apis/search-application-search.asciidoc b/docs/reference/search-application/apis/search-application-search.asciidoc index 2d13ed5f11037..1aab4ddf84763 100644 --- a/docs/reference/search-application/apis/search-application-search.asciidoc +++ b/docs/reference/search-application/apis/search-application-search.asciidoc @@ -1,13 +1,18 @@ [role="xpack"] [[search-application-search]] === Search Application Search - -beta::[] - ++++ Search Application Search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search_application[Search application APIs]. +-- + +beta::[] + Given specified query parameters, generates and executes an {es} query using the search template associated with the search application or a default template if none is specified. Unspecified template parameters will be assigned their default values (if applicable). diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 70ffe02e44d95..7db9b8a304be5 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -1,6 +1,12 @@ [[search]] == Search APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Search APIs are used to search and aggregate data stored in {es} indices and data streams. For an overview and related tutorials, see <>. diff --git a/docs/reference/search/async-search.asciidoc b/docs/reference/search/async-search.asciidoc index 786cfaee8024c..9a9e9ca45e817 100644 --- a/docs/reference/search/async-search.asciidoc +++ b/docs/reference/search/async-search.asciidoc @@ -2,6 +2,12 @@ [[async-search]] === Async search +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + The async search API let you asynchronously execute a search request, monitor its progress, and retrieve partial results as they become available. diff --git a/docs/reference/search/clear-scroll-api.asciidoc b/docs/reference/search/clear-scroll-api.asciidoc index a005babfd1bef..a6a2cd4b3cab5 100644 --- a/docs/reference/search/clear-scroll-api.asciidoc +++ b/docs/reference/search/clear-scroll-api.asciidoc @@ -4,6 +4,12 @@ Clear scroll ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Clears the search context and results for a <>. diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 399545adf8d1d..e0e86dd131291 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -4,6 +4,12 @@ Count ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Gets the number of matches for a search query. [source,console] diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index 77e2d5bd63efd..01c7bb4611a12 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -4,6 +4,12 @@ Explain ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Returns information about why a specific document matches (or doesn't match) a query. diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index 2ff2b8d18604e..f4d9146b8ea9d 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -4,6 +4,11 @@ Field capabilities ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- Allows you to retrieve the capabilities of fields among multiple indices. For data streams, the API returns field capabilities among the stream's backing diff --git a/docs/reference/search/multi-search-template-api.asciidoc b/docs/reference/search/multi-search-template-api.asciidoc index b1c9518b1f2bc..2fdb412575eb4 100644 --- a/docs/reference/search/multi-search-template-api.asciidoc +++ b/docs/reference/search/multi-search-template-api.asciidoc @@ -4,6 +4,12 @@ Multi search template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Runs multiple <> with a single request. @@ -79,7 +85,7 @@ cross-cluster search requests. Defaults to `true`. `max_concurrent_searches`:: (Optional, integer) Maximum number of concurrent searches the API can run. -Defaults to +max(1, (# of <> * +Defaults to +max(1, (# of <> * min(<>, 10)))+. `rest_total_hits_as_int`:: diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 9cafa756f035e..6adcc62e5ec4f 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -4,6 +4,12 @@ Multi search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Executes several searches with a single API request. [source,console] @@ -91,7 +97,7 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailabl `max_concurrent_searches`:: (Optional, integer) Maximum number of concurrent searches the multi search API can execute. Defaults -to +max(1, (# of <> * min(<>, 10)))+. +to +max(1, (# of <> * min(<>, 10)))+. `max_concurrent_shard_requests`:: + diff --git a/docs/reference/search/point-in-time-api.asciidoc b/docs/reference/search/point-in-time-api.asciidoc index 9cd91626c7600..bc7cbd01ebb36 100644 --- a/docs/reference/search/point-in-time-api.asciidoc +++ b/docs/reference/search/point-in-time-api.asciidoc @@ -4,6 +4,12 @@ Point in time ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + A search request by default executes against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 5f1a0ccfdd6b4..4fbe5ea1bb9f8 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -24,6 +24,11 @@ The output from the Profile API is *very* verbose, especially for complicated requests executed across many shards. Pretty-printing the response is recommended to help understand the output. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- [[search-profile-api-example]] ==== {api-examples-title} diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 05862ebbbcca5..4a03371c4da3d 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -4,6 +4,12 @@ Ranking evaluation ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Allows you to evaluate the quality of ranked search results over a set of typical search queries. diff --git a/docs/reference/search/render-search-template-api.asciidoc b/docs/reference/search/render-search-template-api.asciidoc index 0c782f26068e6..42e82b6e352be 100644 --- a/docs/reference/search/render-search-template-api.asciidoc +++ b/docs/reference/search/render-search-template-api.asciidoc @@ -4,6 +4,12 @@ Render search template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Renders a <> as a <>. diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index f20e9148bf5e7..7e98297b780e6 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -14,6 +14,12 @@ Refer to <> for a high level overview of the retrievers abs Refer to <> for additional examples. ==== +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + The following retrievers are available: `standard`:: @@ -22,6 +28,9 @@ A <> that replaces the functionality of a traditi `knn`:: A <> that replaces the functionality of a <>. +`rescorer`:: +A <> that replaces the functionality of the <>. + `rrf`:: A <> that produces top documents from <>. @@ -371,6 +380,122 @@ GET movies/_search ---- // TEST[skip:uses ELSER] +[[rescorer-retriever]] +==== Rescorer Retriever + +The `rescorer` retriever re-scores only the results produced by its child retriever. +For the `standard` and `knn` retrievers, the `window_size` parameter specifies the number of documents examined per shard. + +For compound retrievers like `rrf`, the `window_size` parameter defines the total number of documents examined globally. + +When using the `rescorer`, an error is returned if the following conditions are not met: + +* The minimum configured rescore's `window_size` is: +** Greater than or equal to the `size` of the parent retriever for nested `rescorer` setups. +** Greater than or equal to the `size` of the search request when used as the primary retriever in the tree. + +* And the maximum rescore's `window_size` is: +** Smaller than or equal to the `size` or `rank_window_size` of the child retriever. + +[discrete] +[[rescorer-retriever-parameters]] +===== Parameters + +`rescore`:: +(Required. <>) ++ +Defines the <> applied sequentially to the top documents returned by the child retriever. + +`retriever`:: +(Required. <>) ++ +Specifies the child retriever responsible for generating the initial set of top documents to be re-ranked. + +`filter`:: +(Optional. <>) ++ +Applies a <> to the retriever, ensuring that all documents match the filter criteria without affecting their scores. + +[discrete] +[[rescorer-retriever-example]] +==== Example + +The `rescorer` retriever can be placed at any level within the retriever tree. +The following example demonstrates a `rescorer` applied to the results produced by an `rrf` retriever: + +[source,console] +---- +GET movies/_search +{ + "size": 10, <1> + "retriever": { + "rescorer": { <2> + "rescore": { + "query": { <3> + "window_size": 50, <4> + "rescore_query": { + "script_score": { + "script": { + "source": "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", + "params": { + "queryVector": [-0.5, 90.0, -10, 14.8, -156.0] + } + } + } + } + } + }, + "retriever": { <5> + "rrf": { + "rank_window_size": 100, <6> + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "plot_embedding", + "inference_id": "my-elser-model", + "query": "films that explore psychological depths" + } + } + } + }, + { + "standard": { + "query": { + "multi_match": { + "query": "crime", + "fields": [ + "plot", + "title" + ] + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [10, 22, 77], + "k": 10, + "num_candidates": 10 + } + } + ] + } + } + } + } +} +---- +// TEST[skip:uses ELSER] +<1> Specifies the number of top documents to return in the final response. +<2> A `rescorer` retriever applied as the final step. +<3> The definition of the `query` rescorer. +<4> Defines the number of documents to rescore from the child retriever. +<5> Specifies the child retriever definition. +<6> Defines the number of documents returned by the `rrf` retriever, which limits the available documents to + [[text-similarity-reranker-retriever]] ==== Text Similarity Re-ranker Retriever @@ -777,4 +902,4 @@ When a retriever is specified as part of a search, the following elements are no * <> * <> * <> -* <> +* <> use a <> instead diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index a942c0162a80a..842bd7049e3bf 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -27,6 +27,12 @@ return score [[rrf-api]] ==== Reciprocal rank fusion API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + You can use RRF as part of a <> to combine and rank documents using separate sets of top documents (result sets) from a combination of <> using an <>. A minimum of *two* child retrievers is required for ranking. diff --git a/docs/reference/search/scroll-api.asciidoc b/docs/reference/search/scroll-api.asciidoc index e3b4123ddff68..0f89df877ba74 100644 --- a/docs/reference/search/scroll-api.asciidoc +++ b/docs/reference/search/scroll-api.asciidoc @@ -8,6 +8,12 @@ IMPORTANT: We no longer recommend using the scroll API for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the <> parameter with a point in time (PIT). +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Retrieves the next batch of results for a <>. diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 49045acf4c484..13f9ae8772858 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -4,6 +4,12 @@ Search shards ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Returns the indices and shards that a search request would be executed against. [source,console] diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index c60b5281c05e5..2094ee8924014 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -4,6 +4,12 @@ Search template ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Runs a search with a <>. //// diff --git a/docs/reference/search/search-vector-tile-api.asciidoc b/docs/reference/search/search-vector-tile-api.asciidoc index 2cdc29918a699..f63abda6fcb47 100644 --- a/docs/reference/search/search-vector-tile-api.asciidoc +++ b/docs/reference/search/search-vector-tile-api.asciidoc @@ -4,6 +4,11 @@ Vector tile search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- Searches a vector tile for geospatial values. Returns results as a binary https://docs.mapbox.com/vector-tiles/specification[Mapbox vector tile]. diff --git a/docs/reference/search/search-your-data/full-text-search.asciidoc b/docs/reference/search/search-your-data/full-text-search.asciidoc new file mode 100644 index 0000000000000..8641d0e45748a --- /dev/null +++ b/docs/reference/search/search-your-data/full-text-search.asciidoc @@ -0,0 +1,82 @@ +[[full-text-search]] +== Full-text search + +.Hands-on introduction to full-text search +[TIP] +==== +Would you prefer to jump straight into a hands-on tutorial? +Refer to our quick start <>. +==== + +Full-text search, also known as lexical search, is a technique for fast, efficient searching through text fields in documents. +Documents and search queries are transformed to enable returning https://www.elastic.co/what-is/search-relevance[relevant] results instead of simply exact term matches. +Fields of type <> are analyzed and indexed for full-text search. + +Built on decades of information retrieval research, full-text search delivers reliable results that scale predictably as your data grows. Because it runs efficiently on CPUs, {es}'s full-text search requires minimal computational resources compared to GPU-intensive vector operations. + +You can combine full-text search with <> to build modern hybrid search applications. While vector search may require additional GPU resources, the full-text component remains cost-effective by leveraging existing CPU infrastructure. + +[discrete] +[[full-text-search-how-it-works]] +=== How full-text search works + +The following diagram illustrates the components of full-text search. + +image::images/search/full-text-search-overview.svg[Components of full-text search from analysis to relevance scoring, align=center, width=500] + +At a high level, full-text search involves the following: + +* <>: Analysis consists of a pipeline of sequential transformations. Text is transformed into a format optimized for searching using techniques such as stemming, lowercasing, and stop word elimination. {es} contains a number of built-in <> and tokenizers, including options to analyze specific language text. You can also create custom analyzers. ++ +[TIP] +==== +Refer to <> to learn how to test an analyzer and inspect the tokens and metadata it generates. +==== +* *Inverted index creation*: After analysis is complete, {es} builds an inverted index from the resulting tokens. +An inverted index is a data structure that maps each token to the documents that contain it. +It's made up of two key components: +** *Dictionary*: A sorted list of all unique terms in the collection of documents in your index. +** *Posting list*: For each term, a list of document IDs where the term appears, along with optional metadata like term frequency and position. +* *Relevance scoring*: Results are ranked by how relevant they are to the given query. The relevance score of each document is represented by a positive floating-point number called the `_score`. The higher the `_score`, the more relevant the document. ++ +The default <> {es} uses for calculating relevance scores is https://en.wikipedia.org/wiki/Okapi_BM25[Okapi BM25], a variation of the https://en.wikipedia.org/wiki/Tf–idf[TF-IDF algorithm]. BM25 calculates relevance scores based on term frequency, document frequency, and document length. +Refer to this https://www.elastic.co/blog/practical-bm25-part-2-the-bm25-algorithm-and-its-variables[technical blog post] for a deep dive into BM25. +* *Full-text search query*: Query text is analyzed <>, and the resulting tokens are used to search the inverted index. ++ +Query DSL supports a number of <>. ++ +As of 8.17, {esql} also supports <> functions. + +[discrete] +[[full-text-search-getting-started]] +=== Getting started + +For a hands-on introduction to full-text search, refer to the <>. + +[discrete] +[[full-text-search-learn-more]] +=== Learn more + +Here are some resources to help you learn more about full-text search with {es}. + +*Core concepts* + +Learn about the core components of full-text search: + +* <> +* <> +** <> +** <> + +*{es} query languages* + +Learn how to build full-text search queries using {es}'s query languages: + +* <> +* <> + +*Advanced topics* + +For a technical deep dive into {es}'s BM25 implementation read this blog post: https://www.elastic.co/blog/practical-bm25-part-2-the-bm25-algorithm-and-its-variables[The BM25 Algorithm and its Variables]. + +To learn how to optimize the relevance of your search results, refer to <>. \ No newline at end of file diff --git a/docs/reference/search/search-your-data/highlighting.asciidoc b/docs/reference/search/search-your-data/highlighting.asciidoc index 6a432e6104524..63d9c632bffcf 100644 --- a/docs/reference/search/search-your-data/highlighting.asciidoc +++ b/docs/reference/search/search-your-data/highlighting.asciidoc @@ -262,9 +262,11 @@ max_analyzed_offset:: By default, the maximum number of characters analyzed for a highlight request is bounded by the value defined in the <> setting, and when the number of characters exceeds this limit an error is returned. If -this setting is set to a non-negative value, the highlighting stops at this defined +this setting is set to a positive value, the highlighting stops at this defined maximum limit, and the rest of the text is not processed, thus not highlighted and -no error is returned. The <> query setting +no error is returned. If it is specifically set to -1 then the value of +<> is used instead. +For values < -1 or 0, an error is returned. The <> query setting does *not* override the <> which prevails when it's set to lower value than the query setting. diff --git a/docs/reference/search/search-your-data/retrieval-augmented-generation.asciidoc b/docs/reference/search/search-your-data/retrieval-augmented-generation.asciidoc new file mode 100644 index 0000000000000..2958999ede91d --- /dev/null +++ b/docs/reference/search/search-your-data/retrieval-augmented-generation.asciidoc @@ -0,0 +1,76 @@ +[rag-elasticsearch] +== Retrieval augmented generation + +.🍿 Prefer a video introduction? +*********************** +Check out https://www.youtube.com/watch?v=OS4ZefUPAks[this short video] from the Elastic Snackable Series. +*********************** + +Retrieval Augmented Generation (RAG) is a technique for improving language model responses by grounding the model with additional, verifiable sources of information. It works by first retrieving relevant context from an external datastore, which is then added to the model's context window. + +RAG is a form of https://arxiv.org/abs/2301.00234[in-context learning], where the model learns from information provided at inference time. +Compared to fine-tuning or continuous pre-training, RAG can be implemented more quickly and cheaply, and offers several advantages. + +image::images/search/rag-venn-diagram.svg[RAG sits at the intersection of information retrieval and generative AI, align=center, width=500] + +RAG sits at the intersection of https://www.elastic.co/what-is/information-retrieval[information retrieval] and generative AI. +{es} is an excellent tool for implementing RAG, because it offers various retrieval capabilities, such as full-text search, vector search, and hybrid search, as well as other tools like filtering, aggregations, and security features. + +[discrete] +[[rag-elasticsearch-advantages]] +=== Advantages of RAG + +Implementing RAG with {es} has several advantages: + +* *Improved context:* Enables grounding the language model with additional, up-to-date, and/or private data. +* *Reduced hallucination:* Helps minimize factual errors by enabling models to cite authoritative sources. +* *Cost efficiency:* Requires less maintenance compared to fine-tuning or continuously pre-training models. +* *Built-in security:* Controls data access by leveraging {es}'s <> features, such as role-based access control and field/document-level security. +* *Simplified response parsing:* Eliminates the need for custom parsing logic by letting the language model handle parsing {es} responses and formatting the retrieved context. +* *Flexible implementation:* Works with basic <>, and can be gradually updated to add more advanced and computationally intensive <> capabilities. + +[discrete] +[[rag-elasticsearch-components]] +=== RAG system overview + +The following diagram illustrates a simple RAG system using {es}. + +image::images/search/rag-schema.svg[Components of a simple RAG system using Elasticsearch, align=center, role="stretch"] + +The workflow is as follows: + +. The user submits a query. +. Elasticsearch retrieves relevant documents using full-text search, vector search, or hybrid search. +. The language model processes the context and generates a response, using custom instructions. Examples of custom instructions include "Cite a source" or "Provide a concise summary of the `content` field in markdown format." +. The model returns the final response to the user. + +[TIP] +==== +A more advanced setup might include query rewriting between steps 1 and 2. This intermediate step could use one or more additional language models with different instructions to reformulate queries for more specific and detailed responses. +==== + +[discrete] +[[rag-elasticsearch-getting-started]] +=== Getting started + +Start building RAG applications quickly with Playground, which seamlessly integrates {es} with language model providers. +The Playground UI enables you to build, test, and deploy RAG interfaces on top of your {es} indices. + +Playground automatically selects the best retrieval methods for your data, while providing full control over the final {es} queries and language model instructions. +You can also download the underlying Python code to integrate with your existing applications. + +Learn more in the {kibana-ref}/playground.html[Playground documentation] and +try the https://www.elastic.co/demo-gallery/ai-playground[interactive lab] for hands-on experience. + +[discrete] +[[rag-elasticsearch-learn-more]] +=== Learn more + +Learn more about building RAG systems using {es} in these blog posts: + +* https://www.elastic.co/blog/beyond-rag-basics-semantic-search-with-elasticsearch[Beyond RAG Basics: Advanced strategies for AI applications] +* https://www.elastic.co/search-labs/blog/building-a-rag-system-with-gemma-hugging-face-elasticsearch[Building a RAG system with Gemma, Hugging Face, and Elasticsearch] +* https://www.elastic.co/search-labs/blog/rag-agent-tool-elasticsearch-langchain[Building an agentic RAG tool with Elasticsearch and Langchain] + + + diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 5f9e92c575793..8d3768817e856 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -22,7 +22,7 @@ The following APIs support {ccs}: * experimental:[] <> * experimental:[] <> * experimental:[] <> -* experimental:[] <> +* experimental:[] <> [discrete] === Prerequisites diff --git a/docs/reference/search/search-your-data/search-application-api.asciidoc b/docs/reference/search/search-your-data/search-application-api.asciidoc index 2fe28faed546f..89d9a4318944b 100644 --- a/docs/reference/search/search-your-data/search-application-api.asciidoc +++ b/docs/reference/search/search-your-data/search-application-api.asciidoc @@ -206,7 +206,7 @@ will return: // TEST[continued] This uses the default parameters that were defined with the template. -You can also specify one or mre parameters to the render call, for example: +You can also specify one or more parameters to the render call, for example: [source,console] ---- diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index 9ef1ae0ebc59b..0828462fd1850 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -18,7 +18,7 @@ Search for exact values:: Search for <> of numbers, dates, IPs, or strings. -Full-text search:: +<>:: Use <> to query <> and find documents that best match query terms. @@ -43,11 +43,13 @@ DSL, with a simplified user experience. Create search applications based on your results directly in the Kibana Search UI. include::search-api.asciidoc[] +include::full-text-search.asciidoc[] include::../../how-to/recipes.asciidoc[] // ☝️ search relevance recipes include::retrievers-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] +include::retrieval-augmented-generation.asciidoc[] include::search-across-clusters.asciidoc[] include::search-with-synonyms.asciidoc[] include::search-application-overview.asciidoc[] diff --git a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc index 3448940b6fad7..987a24140b6b4 100644 --- a/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-semantic-text.asciidoc @@ -157,89 +157,7 @@ GET semantic-embeddings/_search <2> The query text. As a result, you receive the top 10 documents that are closest in meaning to the -query from the `semantic-embedding` index: - -[source,console-result] ------------------------------------------------------------- -"hits": [ - { - "_index": "semantic-embeddings", - "_id": "Jy5065EBBFPLbFsdh_f9", - "_score": 21.487484, - "_source": { - "id": 8836652, - "content": { - "text": "There are a few foods and food groups that will help to fight inflammation and delayed onset muscle soreness (both things that are inevitable after a long, hard workout) when you incorporate them into your postworkout eats, whether immediately after your run or at a meal later in the day. Advertisement. Advertisement.", - "inference": { - "inference_id": "my-elser-endpoint", - "model_settings": { - "task_type": "sparse_embedding" - }, - "chunks": [ - { - "text": "There are a few foods and food groups that will help to fight inflammation and delayed onset muscle soreness (both things that are inevitable after a long, hard workout) when you incorporate them into your postworkout eats, whether immediately after your run or at a meal later in the day. Advertisement. Advertisement.", - "embeddings": { - (...) - } - } - ] - } - } - } - }, - { - "_index": "semantic-embeddings", - "_id": "Ji5065EBBFPLbFsdh_f9", - "_score": 18.211695, - "_source": { - "id": 8836651, - "content": { - "text": "During Your Workout. There are a few things you can do during your workout to help prevent muscle injury and soreness. According to personal trainer and writer for Iron Magazine, Marc David, doing warm-ups and cool-downs between sets can help keep muscle soreness to a minimum.", - "inference": { - "inference_id": "my-elser-endpoint", - "model_settings": { - "task_type": "sparse_embedding" - }, - "chunks": [ - { - "text": "During Your Workout. There are a few things you can do during your workout to help prevent muscle injury and soreness. According to personal trainer and writer for Iron Magazine, Marc David, doing warm-ups and cool-downs between sets can help keep muscle soreness to a minimum.", - "embeddings": { - (...) - } - } - ] - } - } - } - }, - { - "_index": "semantic-embeddings", - "_id": "Wi5065EBBFPLbFsdh_b9", - "_score": 13.089405, - "_source": { - "id": 8800197, - "content": { - "text": "This is especially important if the soreness is due to a weightlifting routine. For this time period, do not exert more than around 50% of the level of effort (weight, distance and speed) that caused the muscle groups to be sore.", - "inference": { - "inference_id": "my-elser-endpoint", - "model_settings": { - "task_type": "sparse_embedding" - }, - "chunks": [ - { - "text": "This is especially important if the soreness is due to a weightlifting routine. For this time period, do not exert more than around 50% of the level of effort (weight, distance and speed) that caused the muscle groups to be sore.", - "embeddings": { - (...) - } - } - ] - } - } - } - } -] ------------------------------------------------------------- -// NOTCONSOLE +query from the `semantic-embedding` index. [discrete] [[semantic-text-further-examples]] diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 2ad407b4ae1e4..d022605db22b1 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -4,6 +4,12 @@ Search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + Returns search hits that match the query defined in the request. [source,console] diff --git a/docs/reference/search/suggesters.asciidoc b/docs/reference/search/suggesters.asciidoc index c5cdbc3bd6660..22f427477b905 100644 --- a/docs/reference/search/suggesters.asciidoc +++ b/docs/reference/search/suggesters.asciidoc @@ -3,6 +3,12 @@ Suggests similar looking terms based on a provided text by using a suggester. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + [source,console] -------------------------------------------------- POST my-index-000001/_search diff --git a/docs/reference/search/terms-enum.asciidoc b/docs/reference/search/terms-enum.asciidoc index 4f34deb985abe..46f6d3560ecdb 100644 --- a/docs/reference/search/terms-enum.asciidoc +++ b/docs/reference/search/terms-enum.asciidoc @@ -4,6 +4,12 @@ Terms enum ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-search[Search APIs]. +-- + The terms enum API can be used to discover terms in the index that match a partial string. Supported field types are <>, <>, <>, diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index ce682e485cd27..ab943cf72b463 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -4,6 +4,12 @@ Validate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/operation/operation-indices-validate-query[Validate a query]. +-- + Validates a potentially expensive query without executing it. [source,console] diff --git a/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc index ff67be02e6d00..ac357e29d1d17 100644 --- a/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc +++ b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc @@ -5,6 +5,12 @@ Clear cache ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + experimental::[] Clears indices and data streams from the shared cache for diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc index b47bc2370ab10..f1613ea62492d 100644 --- a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -5,6 +5,12 @@ Mount snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + Mount a snapshot as a searchable snapshot index. [[searchable-snapshots-api-mount-request]] diff --git a/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc b/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc index 62faceb99d4fc..d42ba02876941 100644 --- a/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc +++ b/docs/reference/searchable-snapshots/apis/node-cache-stats.asciidoc @@ -5,6 +5,12 @@ Cache stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + Retrieves statistics about the shared cache for <>. diff --git a/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc b/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc index f90e27ea63224..1a1856198d286 100644 --- a/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc +++ b/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc @@ -2,6 +2,12 @@ [[searchable-snapshots-apis]] == Searchable snapshots APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + You can use the following APIs to perform searchable snapshots operations. * <> diff --git a/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc b/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc index 369d19da9ae56..f1eb9ac8d92b2 100644 --- a/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc +++ b/docs/reference/searchable-snapshots/apis/shard-stats.asciidoc @@ -5,6 +5,12 @@ Searchable snapshot statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-searchable_snapshots[Searchable snapshots APIs]. +-- + experimental::[] Retrieves statistics about searchable snapshots. diff --git a/docs/reference/security/authorization/built-in-roles.asciidoc b/docs/reference/security/authorization/built-in-roles.asciidoc index 13812b915dc5e..846ab3b6f73aa 100644 --- a/docs/reference/security/authorization/built-in-roles.asciidoc +++ b/docs/reference/security/authorization/built-in-roles.asciidoc @@ -33,18 +33,6 @@ suitable for writing beats output to {es}. -- -[[built-in-roles-data-frame-transforms-admin]] `data_frame_transforms_admin` :: -Grants `manage_data_frame_transforms` cluster privileges, which enable you to -manage {transforms}. This role also includes all -{kibana-ref}/kibana-privileges.html[Kibana privileges] for the {ml-features}. -deprecated:[7.5.0,"Replaced by <>"]. - -[[built-in-roles-data-frame-transforms-user]] `data_frame_transforms_user` :: -Grants `monitor_data_frame_transforms` cluster privileges, which enable you to -use {transforms}. This role also includes all -{kibana-ref}/kibana-privileges.html[Kibana privileges] for the {ml-features}. -deprecated:[7.5.0,"Replaced by <>"]. - [[built-in-roles-editor]] `editor` :: Grants full access to all features in {kib} (including Solutions) and read-only access to data indices. diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index 1077a63b00249..08163b1391f2d 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -19,6 +19,8 @@ at all. // end::ml-settings-description-tag[] +TIP: To control memory usage used by {ml} jobs, you can use the <>. + [discrete] [[general-ml-settings]] ==== General machine learning settings @@ -67,7 +69,7 @@ limitations as described <>. The inference cache exists in the JVM heap on each ingest node. The cache affords faster processing times for the `inference` processor. The value can be a static byte sized value (such as `2gb`) or a percentage of total allocated -heap. Defaults to `40%`. See also <>. +heap. Defaults to `40%`. See also <>. [[xpack-interference-model-ttl]] // tag::interference-model-ttl-tag[] @@ -249,11 +251,4 @@ nodes in your cluster, you shouldn't use this setting. + If this setting is `true` it also affects the default value for `xpack.ml.max_model_memory_limit`. In this case `xpack.ml.max_model_memory_limit` -defaults to the largest size that could be assigned in the current cluster. - -[discrete] -[[model-inference-circuit-breaker]] -==== {ml-cap} circuit breaker settings - -The relevant circuit breaker settings can be found in the <>. - +defaults to the largest size that could be assigned in the current cluster. \ No newline at end of file diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index a284e563917c3..922f1bdba4d1f 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -27,6 +27,8 @@ the only resource-intensive application on the host or container. For example, you might run {metricbeat} alongside {es} for cluster statistics, but a resource-heavy {ls} deployment should be on its own host. +// alphabetized + include::run-elasticsearch-locally.asciidoc[] include::setup/install.asciidoc[] @@ -47,30 +49,28 @@ include::settings/ccr-settings.asciidoc[] include::modules/discovery/discovery-settings.asciidoc[] +include::settings/data-stream-lifecycle-settings.asciidoc[] + include::modules/indices/fielddata.asciidoc[] +include::modules/gateway.asciidoc[] + include::settings/health-diagnostic-settings.asciidoc[] include::settings/ilm-settings.asciidoc[] -include::settings/data-stream-lifecycle-settings.asciidoc[] - include::modules/indices/index_management.asciidoc[] include::modules/indices/recovery.asciidoc[] include::modules/indices/indexing_buffer.asciidoc[] -include::settings/license-settings.asciidoc[] - -include::modules/gateway.asciidoc[] +include::settings/inference-settings.asciidoc[] -include::setup/logging-config.asciidoc[] +include::settings/license-settings.asciidoc[] include::settings/ml-settings.asciidoc[] -include::settings/inference-settings.asciidoc[] - include::settings/monitoring-settings.asciidoc[] include::modules/node.asciidoc[] @@ -79,12 +79,12 @@ include::modules/network.asciidoc[] include::modules/indices/query_cache.asciidoc[] +include::{es-ref-dir}/path-settings-overview.asciidoc[] + include::modules/indices/search-settings.asciidoc[] include::settings/security-settings.asciidoc[] -include::modules/shard-ops.asciidoc[] - include::modules/indices/request_cache.asciidoc[] include::settings/snapshot-settings.asciidoc[] diff --git a/docs/reference/setup/add-nodes.asciidoc b/docs/reference/setup/add-nodes.asciidoc index ba749782c092f..941a3e6c40f79 100644 --- a/docs/reference/setup/add-nodes.asciidoc +++ b/docs/reference/setup/add-nodes.asciidoc @@ -48,7 +48,7 @@ For more information about discovery and shard allocation, refer to As nodes are added or removed Elasticsearch maintains an optimal level of fault tolerance by automatically updating the cluster's _voting configuration_, which -is the set of <> whose responses are counted +is the set of <> whose responses are counted when making decisions such as electing a new master or committing a new cluster state. diff --git a/docs/reference/setup/advanced-configuration.asciidoc b/docs/reference/setup/advanced-configuration.asciidoc index 2a7ccc56742de..ff80b51f0408b 100644 --- a/docs/reference/setup/advanced-configuration.asciidoc +++ b/docs/reference/setup/advanced-configuration.asciidoc @@ -1,13 +1,7 @@ [[advanced-configuration]] -=== Advanced configuration - -Modifying advanced settings is generally not recommended and could negatively -impact performance and stability. Using the {es}-provided defaults -is recommended in most circumstances. +=== Set JVM options [[set-jvm-options]] -==== Set JVM options - If needed, you can override the default JVM options by adding custom options files (preferred) or setting the `ES_JAVA_OPTS` environment variable. @@ -21,10 +15,15 @@ Where you put the JVM options files depends on the type of installation: * Docker: Bind mount custom JVM options files into `/usr/share/elasticsearch/config/jvm.options.d/`. +CAUTION: Setting your own JVM options is generally not recommended and could negatively +impact performance and stability. Using the {es}-provided defaults +is recommended in most circumstances. + +[[readiness-tcp-port]] NOTE: Do not modify the root `jvm.options` file. Use files in `jvm.options.d/` instead. [[jvm-options-syntax]] -===== JVM options syntax +==== JVM options syntax A JVM options file contains a line-delimited list of JVM arguments. Arguments are preceded by a dash (`-`). @@ -66,7 +65,7 @@ and ignored. Lines that aren't commented out and aren't recognized as valid JVM arguments are rejected and {es} will fail to start. [[jvm-options-env]] -===== Use environment variables to set JVM options +==== Use environment variables to set JVM options In production, use JVM options files to override the default settings. In testing and development environments, @@ -155,23 +154,11 @@ options. We do not recommend using `ES_JAVA_OPTS` in production. NOTE: If you are running {es} as a Windows service, you can change the heap size using the service manager. See <>. -[[readiness-tcp-port]] -===== Enable the Elasticsearch TCP readiness port - -preview::[] - -If configured, a node can open a TCP port when the node is in a ready state. A node is deemed -ready when it has successfully joined a cluster. In a single node configuration, the node is -said to be ready, when it's able to accept requests. - -To enable the readiness TCP port, use the `readiness.port` setting. The readiness service will bind to -all host addresses. - -If the node leaves the cluster, or the <> is used to mark the node -for shutdown, the readiness port is immediately closed. - -A successful connection to the readiness TCP port signals that the {es} node is ready. When a client -connects to the readiness port, the server simply terminates the socket connection. No data is sent back -to the client. If a client cannot connect to the readiness port, the node is not ready. +[[heap-dump-path-setting]] +include::important-settings/heap-dump-path.asciidoc[leveloffset=-1] +[[gc-logging]] +include::important-settings/gc-logging.asciidoc[leveloffset=-1] +[[error-file-path]] +include::important-settings/error-file.asciidoc[leveloffset=-1] \ No newline at end of file diff --git a/docs/reference/setup/important-settings.asciidoc b/docs/reference/setup/important-settings.asciidoc index 03c891af70743..b822ee9b3f903 100644 --- a/docs/reference/setup/important-settings.asciidoc +++ b/docs/reference/setup/important-settings.asciidoc @@ -19,10 +19,20 @@ of items which *must* be considered before using your cluster in production: Our {ess-trial}[{ecloud}] service configures these items automatically, making your cluster production-ready by default. +[[path-settings]] +[discrete] +==== Path settings + include::important-settings/path-settings.asciidoc[] +Elasticsearch offers a deprecated setting that allows you to specify multiple paths in `path.data`. +To learn about this setting, and how to migrate away from it, refer to <>. + include::important-settings/cluster-name.asciidoc[] +[[node-name]] +[discrete] +==== Node name setting include::important-settings/node-name.asciidoc[] include::important-settings/network-host.asciidoc[] @@ -31,6 +41,7 @@ include::important-settings/discovery-settings.asciidoc[] include::important-settings/heap-size.asciidoc[] +[[heap-dump-path]] include::important-settings/heap-dump-path.asciidoc[] include::important-settings/gc-logging.asciidoc[] diff --git a/docs/reference/setup/important-settings/cluster-name.asciidoc b/docs/reference/setup/important-settings/cluster-name.asciidoc index 3f1516f21de1e..6d489eee76cf6 100644 --- a/docs/reference/setup/important-settings/cluster-name.asciidoc +++ b/docs/reference/setup/important-settings/cluster-name.asciidoc @@ -1,4 +1,3 @@ -[[cluster-name]] [discrete] ==== Cluster name setting diff --git a/docs/reference/setup/important-settings/error-file.asciidoc b/docs/reference/setup/important-settings/error-file.asciidoc index ca95ded78d53f..2f654002d51f8 100644 --- a/docs/reference/setup/important-settings/error-file.asciidoc +++ b/docs/reference/setup/important-settings/error-file.asciidoc @@ -1,4 +1,3 @@ -[[error-file-path]] [discrete] ==== JVM fatal error log setting diff --git a/docs/reference/setup/important-settings/gc-logging.asciidoc b/docs/reference/setup/important-settings/gc-logging.asciidoc index 3534e1335c9fd..873c85d58d914 100644 --- a/docs/reference/setup/important-settings/gc-logging.asciidoc +++ b/docs/reference/setup/important-settings/gc-logging.asciidoc @@ -1,4 +1,3 @@ -[[gc-logging]] [discrete] ==== GC logging settings @@ -20,9 +19,8 @@ To see further options not contained in the original JEP, see https://docs.oracle.com/en/java/javase/13/docs/specs/man/java.html#enable-logging-with-the-jvm-unified-logging-framework[Enable Logging with the JVM Unified Logging Framework]. -[[gc-logging-examples]] [discrete] -==== Examples +===== Examples Change the default GC log output location to `/opt/my-app/gc.log` by creating `$ES_HOME/config/jvm.options.d/gc.options` with some sample diff --git a/docs/reference/setup/important-settings/heap-dump-path.asciidoc b/docs/reference/setup/important-settings/heap-dump-path.asciidoc index 8f01379842a90..8b06ae752f360 100644 --- a/docs/reference/setup/important-settings/heap-dump-path.asciidoc +++ b/docs/reference/setup/important-settings/heap-dump-path.asciidoc @@ -1,4 +1,3 @@ -[[heap-dump-path]] [discrete] ==== JVM heap dump path setting diff --git a/docs/reference/setup/important-settings/node-name.asciidoc b/docs/reference/setup/important-settings/node-name.asciidoc index eda3052d119c9..f1260844a0549 100644 --- a/docs/reference/setup/important-settings/node-name.asciidoc +++ b/docs/reference/setup/important-settings/node-name.asciidoc @@ -1,7 +1,3 @@ -[[node-name]] -[discrete] -==== Node name setting - {es} uses `node.name` as a human-readable identifier for a particular instance of {es}. This name is included in the response of many APIs. The node name defaults to the hostname of the machine when diff --git a/docs/reference/setup/important-settings/path-settings.asciidoc b/docs/reference/setup/important-settings/path-settings.asciidoc index a0a444ca5090a..002e08e2dc746 100644 --- a/docs/reference/setup/important-settings/path-settings.asciidoc +++ b/docs/reference/setup/important-settings/path-settings.asciidoc @@ -1,7 +1,3 @@ -[[path-settings]] -[discrete] -==== Path settings - {es} writes the data you index to indices and data streams to a `data` directory. {es} writes its own application logs, which contain information about cluster health and operations, to a `logs` directory. @@ -20,113 +16,4 @@ Supported `path.data` and `path.logs` values vary by platform: include::{es-ref-dir}/tab-widgets/customize-data-log-path-widget.asciidoc[] -include::{es-ref-dir}/modules/node.asciidoc[tag=modules-node-data-path-warning-tag] - -[discrete] -==== Multiple data paths -deprecated::[7.13.0] - -If needed, you can specify multiple paths in `path.data`. {es} stores the node's -data across all provided paths but keeps each shard's data on the same path. - -{es} does not balance shards across a node's data paths. High disk -usage in a single path can trigger a <> for the entire node. If triggered, {es} will not add shards to -the node, even if the node’s other paths have available disk space. If you need -additional disk space, we recommend you add a new node rather than additional -data paths. - -include::{es-ref-dir}/tab-widgets/multi-data-path-widget.asciidoc[] - -[discrete] -[[mdp-migrate]] -==== Migrate from multiple data paths - -Support for multiple data paths was deprecated in 7.13 and will be removed -in a future release. - -As an alternative to multiple data paths, you can create a filesystem which -spans multiple disks with a hardware virtualisation layer such as RAID, or a -software virtualisation layer such as Logical Volume Manager (LVM) on Linux or -Storage Spaces on Windows. If you wish to use multiple data paths on a single -machine then you must run one node for each data path. - -If you currently use multiple data paths in a -{ref}/high-availability-cluster-design.html[highly available cluster] then you -can migrate to a setup that uses a single path for each node without downtime -using a process similar to a -{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart]: shut each -node down in turn and replace it with one or more nodes each configured to use -a single data path. In more detail, for each node that currently has multiple -data paths you should follow the following process. In principle you can -perform this migration during a rolling upgrade to 8.0, but we recommend -migrating to a single-data-path setup before starting to upgrade. - -1. Take a snapshot to protect your data in case of disaster. - -2. Optionally, migrate the data away from the target node by using an -{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filter]: -+ -[source,console] --------------------------------------------------- -PUT _cluster/settings -{ - "persistent": { - "cluster.routing.allocation.exclude._name": "target-node-name" - } -} --------------------------------------------------- -+ -You can use the {ref}/cat-allocation.html[cat allocation API] to track progress -of this data migration. If some shards do not migrate then the -{ref}/cluster-allocation-explain.html[cluster allocation explain API] will help -you to determine why. - -3. Follow the steps in the -{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] -up to and including shutting the target node down. - -4. Ensure your cluster health is `yellow` or `green`, so that there is a copy -of every shard assigned to at least one of the other nodes in your cluster. - -5. If applicable, remove the allocation filter applied in the earlier step. -+ -[source,console] --------------------------------------------------- -PUT _cluster/settings -{ - "persistent": { - "cluster.routing.allocation.exclude._name": null - } -} --------------------------------------------------- - -6. Discard the data held by the stopped node by deleting the contents of its -data paths. - -7. Reconfigure your storage. For instance, combine your disks into a single -filesystem using LVM or Storage Spaces. Ensure that your reconfigured storage -has sufficient space for the data that it will hold. - -8. Reconfigure your node by adjusting the `path.data` setting in its -`elasticsearch.yml` file. If needed, install more nodes each with their own -`path.data` setting pointing at a separate data path. - -9. Start the new nodes and follow the rest of the -{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] for -them. - -10. Ensure your cluster health is `green`, so that every shard has been -assigned. - -You can alternatively add some number of single-data-path nodes to your -cluster, migrate all your data over to these new nodes using -{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filters], -and then remove the old nodes from the cluster. This approach will temporarily -double the size of your cluster so it will only work if you have the capacity to -expand your cluster like this. - -If you currently use multiple data paths but your cluster is not highly -available then you can migrate to a non-deprecated configuration by taking -a snapshot, creating a new cluster with the desired configuration and restoring -the snapshot into it. +include::{es-ref-dir}/modules/node.asciidoc[tag=modules-node-data-path-warning-tag] \ No newline at end of file diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 8694d7f5b46c6..f3576db0c786c 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -25,6 +25,21 @@ TIP: This setup doesn't run multiple {es} nodes or {kib} by default. To create a multi-node cluster with {kib}, use Docker Compose instead. See <>. +[[docker-wolfi-hardened-image]] +===== Hardened Docker images + +You can also use the hardened https://wolfi.dev/[Wolfi] image for additional security. +Using Wolfi images requires Docker version 20.10.10 or higher. + +To use the Wolfi image, append `-wolfi` to the image tag in the Docker command. + +For example: + +[source,sh,subs="attributes"] +---- +docker pull {docker-wolfi-image} +---- + ===== Start a single-node cluster . Install Docker. Visit https://docs.docker.com/get-docker/[Get Docker] to diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index e382bbdacb464..04e9ba3f0bef9 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -1,5 +1,5 @@ [[logging]] -=== Logging +== Elasticsearch application logging You can use {es}'s application logs to monitor your cluster and diagnose issues. If you run {es} as a service, the default location of the logs varies based on @@ -11,7 +11,7 @@ If you run {es} from the command line, {es} prints logs to the standard output (`stdout`). [discrete] -[[loggin-configuration]] +[[logging-configuration]] === Logging configuration IMPORTANT: Elastic strongly recommends using the Log4j 2 configuration that is shipped by default. @@ -304,6 +304,7 @@ The user ID is included in the `X-Opaque-ID` field in deprecation JSON logs. Deprecation logs can be indexed into `.logs-deprecation.elasticsearch-default` data stream `cluster.deprecation_indexing.enabled` setting is set to true. +[discrete] ==== Deprecation logs throttling :es-rate-limiting-filter-java-doc: {elasticsearch-javadoc}/org/elasticsearch/common/logging/RateLimitingFilter.html Deprecation logs are deduplicated based on a deprecated feature key diff --git a/docs/reference/shard-request-cache.asciidoc b/docs/reference/shard-request-cache.asciidoc new file mode 100644 index 0000000000000..ec79dfb531bdb --- /dev/null +++ b/docs/reference/shard-request-cache.asciidoc @@ -0,0 +1,134 @@ +[[shard-request-cache]] +=== The shard request cache + +When a search request is run against an index or against many indices, each +involved shard executes the search locally and returns its local results to +the _coordinating node_, which combines these shard-level results into a +``global'' result set. + +The shard-level request cache module caches the local results on each shard. +This allows frequently used (and potentially heavy) search requests to return +results almost instantly. The requests cache is a very good fit for the logging +use case, where only the most recent index is being actively updated -- +results from older indices will be served directly from the cache. + +You can control the size and expiration of the cache at the node level using the <>. + +[IMPORTANT] +=================================== + +By default, the requests cache will only cache the results of search requests +where `size=0`, so it will not cache `hits`, +but it will cache `hits.total`, <>, and +<>. + +Most queries that use `now` (see <>) cannot be cached. + +Scripted queries that use the API calls which are non-deterministic, such as +`Math.random()` or `new Date()` are not cached. +=================================== + +[discrete] +==== Cache invalidation + +The cache is smart -- it keeps the same _near real-time_ promise as uncached +search. + +Cached results are invalidated automatically whenever the shard refreshes to +pick up changes to the documents or when you update the mapping. In other +words you will always get the same results from the cache as you would for an +uncached search request. + +The longer the refresh interval, the longer that cached entries will remain +valid even if there are changes to the documents. If the cache is full, the +least recently used cache keys will be evicted. + +The cache can be expired manually with the <>: + +[source,console] +------------------------ +POST /my-index-000001,my-index-000002/_cache/clear?request=true +------------------------ +// TEST[s/^/PUT my-index-000001\nPUT my-index-000002\n/] + +[discrete] +==== Enabling and disabling caching + +The cache is enabled by default, but can be disabled when creating a new +index as follows: + +[source,console] +----------------------------- +PUT /my-index-000001 +{ + "settings": { + "index.requests.cache.enable": false + } +} +----------------------------- + +It can also be enabled or disabled dynamically on an existing index with the +<> API: + +[source,console] +----------------------------- +PUT /my-index-000001/_settings +{ "index.requests.cache.enable": true } +----------------------------- +// TEST[continued] + + +[discrete] +==== Enabling and disabling caching per request + +The `request_cache` query-string parameter can be used to enable or disable +caching on a *per-request* basis. If set, it overrides the index-level setting: + +[source,console] +----------------------------- +GET /my-index-000001/_search?request_cache=true +{ + "size": 0, + "aggs": { + "popular_colors": { + "terms": { + "field": "colors" + } + } + } +} +----------------------------- +// TEST[continued] + +Requests where `size` is greater than 0 will not be cached even if the request cache is +enabled in the index settings. To cache these requests you will need to use the +query-string parameter detailed here. + +[discrete] +==== Cache key + +A hash of the whole JSON body is used as the cache key. This means that if the JSON +changes -- for instance if keys are output in a different order -- then the +cache key will not be recognised. + +TIP: Most JSON libraries support a _canonical_ mode which ensures that JSON +keys are always emitted in the same order. This canonical mode can be used in +the application to ensure that a request is always serialized in the same way. + +[discrete] +==== Monitoring cache usage + +The size of the cache (in bytes) and the number of evictions can be viewed +by index, with the <> API: + +[source,console] +------------------------ +GET /_stats/request_cache?human +------------------------ + +or by node with the <> API: + +[source,console] +------------------------ +GET /_nodes/stats/indices/request_cache?human +------------------------ diff --git a/docs/reference/shutdown/apis/shutdown-api.asciidoc b/docs/reference/shutdown/apis/shutdown-api.asciidoc index 24cbca720d160..b950cd3d19c56 100644 --- a/docs/reference/shutdown/apis/shutdown-api.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-api.asciidoc @@ -4,6 +4,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + You use the shutdown APIs to prepare nodes for temporary or permanent shutdown, monitor the shutdown status, and enable a previously shut-down node to resume normal operations. [discrete] diff --git a/docs/reference/shutdown/apis/shutdown-delete.asciidoc b/docs/reference/shutdown/apis/shutdown-delete.asciidoc index 4d7f30c3a1e48..225d88c63a016 100644 --- a/docs/reference/shutdown/apis/shutdown-delete.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-delete.asciidoc @@ -3,6 +3,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + Cancels shutdown preparations or clears a shutdown request so a node can resume normal operations. diff --git a/docs/reference/shutdown/apis/shutdown-get.asciidoc b/docs/reference/shutdown/apis/shutdown-get.asciidoc index 5feac28353ab5..b0097eb0caf7f 100644 --- a/docs/reference/shutdown/apis/shutdown-get.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-get.asciidoc @@ -3,6 +3,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + Retrieves the status of a node that's being prepared for shutdown. [[get-shutdown-api-request]] @@ -72,6 +78,7 @@ including the status of shard migration, task migration, and plugin cleanup: "nodes": [ { "node_id": "USpTGYaBSIKbgSUJR2Z9lg", + "node_ephemeral_id": null, "type": "RESTART", "reason": "Demonstrating how the node shutdown API works", "shutdown_startedmillis": 1624406108685, diff --git a/docs/reference/shutdown/apis/shutdown-put.asciidoc b/docs/reference/shutdown/apis/shutdown-put.asciidoc index 344dd8fa36717..5eef4763a9c3a 100644 --- a/docs/reference/shutdown/apis/shutdown-put.asciidoc +++ b/docs/reference/shutdown/apis/shutdown-put.asciidoc @@ -3,6 +3,12 @@ NOTE: {cloud-only} +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-shutdown[Node lifecycle APIs]. +-- + Prepares a node to be shut down. [[put-shutdown-api-request]] diff --git a/docs/reference/slm/apis/slm-api.asciidoc b/docs/reference/slm/apis/slm-api.asciidoc index d061ff6b0aaf7..ee624a70b00f5 100644 --- a/docs/reference/slm/apis/slm-api.asciidoc +++ b/docs/reference/slm/apis/slm-api.asciidoc @@ -2,6 +2,12 @@ [[snapshot-lifecycle-management-api]] == {slm-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + You use the following APIs to set up policies to automatically take snapshots and control how long they are retained. diff --git a/docs/reference/slm/apis/slm-delete.asciidoc b/docs/reference/slm/apis/slm-delete.asciidoc index 650ee68e24fb7..9c63d7326421f 100644 --- a/docs/reference/slm/apis/slm-delete.asciidoc +++ b/docs/reference/slm/apis/slm-delete.asciidoc @@ -5,6 +5,12 @@ Delete policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Deletes an existing snapshot lifecycle policy. [[slm-api-delete-lifecycle-request]] diff --git a/docs/reference/slm/apis/slm-execute-retention.asciidoc b/docs/reference/slm/apis/slm-execute-retention.asciidoc index 75bcdb3143668..ad8d7b15a43d7 100644 --- a/docs/reference/slm/apis/slm-execute-retention.asciidoc +++ b/docs/reference/slm/apis/slm-execute-retention.asciidoc @@ -5,6 +5,12 @@ Execute snapshot retention policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Deletes any snapshots that are expired according to the policy's retention rules. [[slm-api-execute-retention-request]] diff --git a/docs/reference/slm/apis/slm-execute.asciidoc b/docs/reference/slm/apis/slm-execute.asciidoc index f3977d6aed2fd..9ea35602de4e2 100644 --- a/docs/reference/slm/apis/slm-execute.asciidoc +++ b/docs/reference/slm/apis/slm-execute.asciidoc @@ -5,6 +5,12 @@ Execute snapshot lifecycle policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. diff --git a/docs/reference/slm/apis/slm-get-status.asciidoc b/docs/reference/slm/apis/slm-get-status.asciidoc index d4afbaddb1beb..181927ac35a1c 100644 --- a/docs/reference/slm/apis/slm-get-status.asciidoc +++ b/docs/reference/slm/apis/slm-get-status.asciidoc @@ -7,6 +7,12 @@ Get {slm} status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Retrieves the status of {slm} ({slm-init}). [[slm-api-get-status-request]] diff --git a/docs/reference/slm/apis/slm-get.asciidoc b/docs/reference/slm/apis/slm-get.asciidoc index f7c847d06dc4b..723d8b374b914 100644 --- a/docs/reference/slm/apis/slm-get.asciidoc +++ b/docs/reference/slm/apis/slm-get.asciidoc @@ -5,6 +5,12 @@ Get policy ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. diff --git a/docs/reference/slm/apis/slm-put.asciidoc b/docs/reference/slm/apis/slm-put.asciidoc index 51ad571ee12e7..a6ee29bfd1bc9 100644 --- a/docs/reference/slm/apis/slm-put.asciidoc +++ b/docs/reference/slm/apis/slm-put.asciidoc @@ -4,8 +4,13 @@ Create or update policy ++++ -Creates or updates a snapshot lifecycle policy. +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- +Creates or updates a snapshot lifecycle policy. [[slm-api-put-request]] ==== {api-request-title} diff --git a/docs/reference/slm/apis/slm-start.asciidoc b/docs/reference/slm/apis/slm-start.asciidoc index 9d9b8108cb57b..87c19ec600b61 100644 --- a/docs/reference/slm/apis/slm-start.asciidoc +++ b/docs/reference/slm/apis/slm-start.asciidoc @@ -1,12 +1,17 @@ [role="xpack"] [[slm-api-start]] === Start {slm} API - [subs="attributes"] ++++ Start {slm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Turns on {slm} ({slm-init}). [[slm-api-start-request]] diff --git a/docs/reference/slm/apis/slm-stats.asciidoc b/docs/reference/slm/apis/slm-stats.asciidoc index 340631e9c5601..d6b9cd960e45f 100644 --- a/docs/reference/slm/apis/slm-stats.asciidoc +++ b/docs/reference/slm/apis/slm-stats.asciidoc @@ -5,6 +5,12 @@ Get snapshot lifecycle stats ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Returns global and policy-level statistics about actions taken by {slm}. [[slm-api-stats-request]] diff --git a/docs/reference/slm/apis/slm-stop.asciidoc b/docs/reference/slm/apis/slm-stop.asciidoc index 253abec7b4d11..2bfe9646bcffd 100644 --- a/docs/reference/slm/apis/slm-stop.asciidoc +++ b/docs/reference/slm/apis/slm-stop.asciidoc @@ -1,12 +1,17 @@ [role="xpack"] [[slm-api-stop]] === Stop {slm} API - [subs="attributes"] ++++ Stop {slm} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-slm[{slm-cap} APIs]. +-- + Turn off {slm} ({slm-init}). [[slm-api-stop-request]] diff --git a/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc index 249e192c0c587..dbb7541693492 100644 --- a/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/clean-up-repo-api.asciidoc @@ -4,6 +4,12 @@ Clean up snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Triggers the review of a snapshot repository's contents and deletes any stale data that is not referenced by existing snapshots. See <>. diff --git a/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc index 590bc7e7410f5..cf432d807d9c5 100644 --- a/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/clone-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Clone snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Clones part or all of a snapshot into a new snapshot. [source,console] diff --git a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc index baa28bb7b0a53..a6c39abbda734 100644 --- a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Create snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + <> of a cluster or specified data streams and indices. diff --git a/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc index 4301fea642523..e0df427da745c 100644 --- a/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/delete-repo-api.asciidoc @@ -4,6 +4,12 @@ Delete snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Unregisters one or more <>. When a repository is unregistered, {es} only removes the reference to the diff --git a/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc index 8824977d660e4..74db60c1970c0 100644 --- a/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/delete-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Delete snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Deletes a <>. //// diff --git a/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc index cf1b9813c519e..5f6b6485c5ee4 100644 --- a/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-repo-api.asciidoc @@ -4,6 +4,12 @@ Get snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Gets information about one or more registered <>. diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc index 622e1ade024b0..f9eb6a27df039 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Get snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Retrieves information about one or more snapshots. //// diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc index e677408da3f25..dbbf547528f46 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-status-api.asciidoc @@ -4,6 +4,12 @@ Get snapshot status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Retrieves a detailed description of the current state for each shard participating in the snapshot. Note that this API should only be used to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed, or you want to obtain information about one or more existing snapshots, use the <>. //// diff --git a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc index 0d3b5586da869..55b61cc321ed3 100644 --- a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc @@ -4,6 +4,12 @@ Create or update snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Registers or updates a <>. [source,console] diff --git a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc index f18ef1ee6e826..ca46ba1fb2b57 100644 --- a/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/repo-analysis-api.asciidoc @@ -5,6 +5,12 @@ Repository analysis ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Analyzes a repository, reporting its performance characteristics and any incorrect behaviour found. diff --git a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc index 9fe06d73f1a63..89cd0f96915b9 100644 --- a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc @@ -4,6 +4,12 @@ Restore snapshot ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Restores a <> of a cluster or specified data streams and indices. //// @@ -69,7 +75,7 @@ POST /_snapshot/my_repository/my_snapshot/_restore // tag::restore-prereqs[] * You can only restore a snapshot to a running cluster with an elected -<>. The snapshot's repository must be +<>. The snapshot's repository must be <> and available to the cluster. * The snapshot and cluster versions must be compatible. See diff --git a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc index b8bb6a2cd7d13..715687f02edea 100644 --- a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc +++ b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc @@ -1,6 +1,12 @@ [[snapshot-restore-apis]] == Snapshot and restore APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + You can use the following APIs to set up snapshot repositories, manage snapshot backups, and restore snapshots to a running cluster. diff --git a/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc index dd845663be8d7..333a12ff49ac0 100644 --- a/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/verify-repo-api.asciidoc @@ -4,6 +4,12 @@ Verify snapshot repository ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Checks for common misconfigurations in a snapshot repository. See <>. diff --git a/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc index 99ae126b401f5..22a4051a546cd 100644 --- a/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc @@ -5,6 +5,12 @@ Verify repository integrity ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-snapshot[Snapshot and restore APIs]. +-- + Verifies the integrity of the contents of a snapshot repository. //// diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 9b71fe9220385..446d3a409234d 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -350,7 +350,7 @@ include::repository-shared-settings.asciidoc[] will disable retries altogether. Note that if retries are enabled in the Azure client, each of these retries comprises that many client-level retries. -`get_register_retry_delay` +`get_register_retry_delay`:: (<>) Sets the time to wait before trying again if an attempt to read a <> fails. Defaults to `5s`. diff --git a/docs/reference/snapshot-restore/take-snapshot.asciidoc b/docs/reference/snapshot-restore/take-snapshot.asciidoc index 711fcfe4cc484..1ae2258c7da89 100644 --- a/docs/reference/snapshot-restore/take-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/take-snapshot.asciidoc @@ -46,7 +46,7 @@ taking snapshots at different time intervals. include::register-repository.asciidoc[tag=kib-snapshot-prereqs] * You can only take a snapshot from a running cluster with an elected -<>. +<>. * A snapshot repository must be <> and available to the cluster. diff --git a/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc b/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc index 48663ca0d75aa..a16d31c4b12d7 100644 --- a/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc +++ b/docs/reference/sql/apis/clear-sql-cursor-api.asciidoc @@ -5,6 +5,12 @@ Clear SQL cursor ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Clears an <>. //// diff --git a/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc b/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc index 1737a39401dba..e8e4ff68cef07 100644 --- a/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc +++ b/docs/reference/sql/apis/delete-async-sql-search-api.asciidoc @@ -5,6 +5,12 @@ Delete async SQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Deletes an <> or a <>. If the search is still running, the API cancels it. diff --git a/docs/reference/sql/apis/get-async-sql-search-api.asciidoc b/docs/reference/sql/apis/get-async-sql-search-api.asciidoc index 8ae575176dd3a..10ca4c648ebaa 100644 --- a/docs/reference/sql/apis/get-async-sql-search-api.asciidoc +++ b/docs/reference/sql/apis/get-async-sql-search-api.asciidoc @@ -5,6 +5,12 @@ Get async SQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Returns results for an <> or a <>. diff --git a/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc b/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc index 61505bab7c454..7a0d68cd120cc 100644 --- a/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc +++ b/docs/reference/sql/apis/get-async-sql-search-status-api.asciidoc @@ -5,6 +5,12 @@ Get async SQL search status ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Returns the current status of an <> or a <>. diff --git a/docs/reference/sql/apis/sql-apis.asciidoc b/docs/reference/sql/apis/sql-apis.asciidoc index 08300522c3288..60f3c85c55d6c 100644 --- a/docs/reference/sql/apis/sql-apis.asciidoc +++ b/docs/reference/sql/apis/sql-apis.asciidoc @@ -2,6 +2,12 @@ [[sql-apis]] == SQL APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + {es}'s SQL APIs let you run SQL queries on {es} indices and data streams. For an overview of {es}'s SQL features and related tutorials, see <>. diff --git a/docs/reference/sql/apis/sql-search-api.asciidoc b/docs/reference/sql/apis/sql-search-api.asciidoc index 118d7975aefd9..b1d002d343fb8 100644 --- a/docs/reference/sql/apis/sql-search-api.asciidoc +++ b/docs/reference/sql/apis/sql-search-api.asciidoc @@ -5,6 +5,12 @@ SQL search ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Returns results for an <>. [source,console] diff --git a/docs/reference/sql/apis/sql-translate-api.asciidoc b/docs/reference/sql/apis/sql-translate-api.asciidoc index 15e52d118800e..9183423eefd0f 100644 --- a/docs/reference/sql/apis/sql-translate-api.asciidoc +++ b/docs/reference/sql/apis/sql-translate-api.asciidoc @@ -5,6 +5,12 @@ SQL translate ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-sql[SQL APIs]. +-- + Translates an <> into a <> request containing <>. See <>. diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index 1dee7f0840ade..1912a020ab0be 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -100,7 +100,7 @@ requires the keyword `LIKE` for SQL `LIKE` pattern. [[sql-index-frozen]] === Frozen Indices -By default, {es-sql} doesn't search <>. To +By default, {es-sql} doesn't search <>. To search frozen indices, use one of the following features: dedicated configuration parameter:: diff --git a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc index 74cbab8c0b4a2..11f0708bafcda 100644 --- a/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonym-rule.asciidoc @@ -1,10 +1,15 @@ [[delete-synonym-rule]] === Delete synonym rule - ++++ Delete synonym rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Deletes an individual synonym rule from a synonyms set. [[delete-synonym-rule-request]] diff --git a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc index 9ba33ff3a5c75..62162e5c45675 100644 --- a/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/delete-synonyms-set.asciidoc @@ -1,10 +1,15 @@ [[delete-synonyms-set]] === Delete synonyms set - ++++ Delete synonyms set ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Deletes a synonyms set. [[delete-synonyms-set-request]] diff --git a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc index c6c35e0efecca..3f0ee3f173245 100644 --- a/docs/reference/synonyms/apis/get-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/get-synonym-rule.asciidoc @@ -1,10 +1,15 @@ [[get-synonym-rule]] === Get synonym rule - ++++ Get synonym rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Retrieves a synonym rule from a synonyms set. [[get-synonym-rule-request]] diff --git a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc index 70bb5fb69526d..1bb31081712e5 100644 --- a/docs/reference/synonyms/apis/get-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/get-synonyms-set.asciidoc @@ -5,6 +5,12 @@ Get synonyms set ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Retrieves a synonyms set. [[get-synonyms-set-request]] diff --git a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc index 705a24c809e99..33ef220036b7a 100644 --- a/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc +++ b/docs/reference/synonyms/apis/list-synonyms-sets.asciidoc @@ -5,6 +5,12 @@ List synonyms sets ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Retrieves a summary of all defined synonyms sets. This API allows to retrieve the total number of synonyms sets defined. diff --git a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc index de2865632d55e..5bb561f0f923f 100644 --- a/docs/reference/synonyms/apis/put-synonym-rule.asciidoc +++ b/docs/reference/synonyms/apis/put-synonym-rule.asciidoc @@ -5,6 +5,12 @@ Create or update synonym rule ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Creates or updates a synonym rule for a synonym set. [[put-synonym-rule-request]] diff --git a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc index ca75885921456..3af85638d022d 100644 --- a/docs/reference/synonyms/apis/put-synonyms-set.asciidoc +++ b/docs/reference/synonyms/apis/put-synonyms-set.asciidoc @@ -5,6 +5,12 @@ Create or update synonyms set ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + Creates or updates a synonyms set. NOTE: Synonyms sets are limited to a maximum of 10,000 synonym rules per set. diff --git a/docs/reference/synonyms/apis/synonyms-apis.asciidoc b/docs/reference/synonyms/apis/synonyms-apis.asciidoc index dbbc26c36d3df..95fc0aae8c14d 100644 --- a/docs/reference/synonyms/apis/synonyms-apis.asciidoc +++ b/docs/reference/synonyms/apis/synonyms-apis.asciidoc @@ -7,6 +7,12 @@ --- +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-synonyms[Synonyms APIs]. +-- + The synonyms management API provides a convenient way to define and manage synonyms in an internal system index. Related synonyms can be grouped in a "synonyms set". Create as many synonym sets as you need. diff --git a/docs/reference/text-structure/apis/find-field-structure.asciidoc b/docs/reference/text-structure/apis/find-field-structure.asciidoc index 4fa108e92d4cb..c4b289e6c30a9 100644 --- a/docs/reference/text-structure/apis/find-field-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-field-structure.asciidoc @@ -2,6 +2,12 @@ [[find-field-structure]] = Find field structure API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Finds the structure of a field in an Elasticsearch index. [discrete] diff --git a/docs/reference/text-structure/apis/find-message-structure.asciidoc b/docs/reference/text-structure/apis/find-message-structure.asciidoc index 6c1bf5089bed2..18b85069559e3 100644 --- a/docs/reference/text-structure/apis/find-message-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-message-structure.asciidoc @@ -2,6 +2,12 @@ [[find-message-structure]] = Find messages structure API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Finds the structure of a list of text messages. [discrete] diff --git a/docs/reference/text-structure/apis/find-structure.asciidoc b/docs/reference/text-structure/apis/find-structure.asciidoc index 361560bace4ed..fef0584222e13 100644 --- a/docs/reference/text-structure/apis/find-structure.asciidoc +++ b/docs/reference/text-structure/apis/find-structure.asciidoc @@ -2,6 +2,12 @@ [[find-structure]] = Find text structure API +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Finds the structure of text. The text must contain data that is suitable to be ingested into the {stack}. diff --git a/docs/reference/text-structure/apis/index.asciidoc b/docs/reference/text-structure/apis/index.asciidoc index 9f4af120690f7..68607ddf708be 100644 --- a/docs/reference/text-structure/apis/index.asciidoc +++ b/docs/reference/text-structure/apis/index.asciidoc @@ -2,6 +2,12 @@ [[text-structure-apis]] == Text structure APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + You can use the following APIs to find text structures: * <> diff --git a/docs/reference/text-structure/apis/test-grok-pattern.asciidoc b/docs/reference/text-structure/apis/test-grok-pattern.asciidoc index 4034a24cf0a19..7c1cfb41608e9 100644 --- a/docs/reference/text-structure/apis/test-grok-pattern.asciidoc +++ b/docs/reference/text-structure/apis/test-grok-pattern.asciidoc @@ -6,6 +6,12 @@ Test Grok pattern ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-text_structure[Text structure APIs]. +-- + Tests a Grok pattern on lines of text, see also <>. [discrete] diff --git a/docs/reference/transform/apis/delete-transform.asciidoc b/docs/reference/transform/apis/delete-transform.asciidoc index 111dda23690b6..0b386f946d6c3 100644 --- a/docs/reference/transform/apis/delete-transform.asciidoc +++ b/docs/reference/transform/apis/delete-transform.asciidoc @@ -7,6 +7,12 @@ Delete {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Deletes an existing {transform}. [[delete-transform-request]] diff --git a/docs/reference/transform/apis/get-transform-stats.asciidoc b/docs/reference/transform/apis/get-transform-stats.asciidoc index 273b1d094979e..e8d12a994faa3 100644 --- a/docs/reference/transform/apis/get-transform-stats.asciidoc +++ b/docs/reference/transform/apis/get-transform-stats.asciidoc @@ -7,6 +7,12 @@ Get {transform} statistics ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Retrieves usage information for {transforms}. diff --git a/docs/reference/transform/apis/get-transform.asciidoc b/docs/reference/transform/apis/get-transform.asciidoc index ece59138e2893..3eabf4ba26988 100644 --- a/docs/reference/transform/apis/get-transform.asciidoc +++ b/docs/reference/transform/apis/get-transform.asciidoc @@ -6,6 +6,12 @@ Get {transforms} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Retrieves configuration information for {transforms}. [[get-transform-request]] diff --git a/docs/reference/transform/apis/preview-transform.asciidoc b/docs/reference/transform/apis/preview-transform.asciidoc index fa9ad0c0fc8f9..c280bef52e0ee 100644 --- a/docs/reference/transform/apis/preview-transform.asciidoc +++ b/docs/reference/transform/apis/preview-transform.asciidoc @@ -7,6 +7,12 @@ Preview {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Previews a {transform}. [[preview-transform-request]] diff --git a/docs/reference/transform/apis/put-transform.asciidoc b/docs/reference/transform/apis/put-transform.asciidoc index ed2ceba0a7a59..fc9abc8c6470c 100644 --- a/docs/reference/transform/apis/put-transform.asciidoc +++ b/docs/reference/transform/apis/put-transform.asciidoc @@ -7,6 +7,12 @@ Create {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Instantiates a {transform}. [[put-transform-request]] diff --git a/docs/reference/transform/apis/reset-transform.asciidoc b/docs/reference/transform/apis/reset-transform.asciidoc index 1194d3589275d..3d9fd5db180b2 100644 --- a/docs/reference/transform/apis/reset-transform.asciidoc +++ b/docs/reference/transform/apis/reset-transform.asciidoc @@ -8,6 +8,12 @@ Reset {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Resets a {transform}. [[reset-transform-request]] diff --git a/docs/reference/transform/apis/schedule-now-transform.asciidoc b/docs/reference/transform/apis/schedule-now-transform.asciidoc index 7a276edf08819..3349e14b02caf 100644 --- a/docs/reference/transform/apis/schedule-now-transform.asciidoc +++ b/docs/reference/transform/apis/schedule-now-transform.asciidoc @@ -8,6 +8,12 @@ Schedule now {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Instantly runs a {transform} to process data. [[schedule-now-transform-request]] diff --git a/docs/reference/transform/apis/start-transform.asciidoc b/docs/reference/transform/apis/start-transform.asciidoc index f4f99f0f3457a..4bcb951f4c6b3 100644 --- a/docs/reference/transform/apis/start-transform.asciidoc +++ b/docs/reference/transform/apis/start-transform.asciidoc @@ -7,6 +7,12 @@ Start {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Starts a {transform}. [[start-transform-request]] diff --git a/docs/reference/transform/apis/stop-transform.asciidoc b/docs/reference/transform/apis/stop-transform.asciidoc index e99fcbd413eba..d87784e036ae4 100644 --- a/docs/reference/transform/apis/stop-transform.asciidoc +++ b/docs/reference/transform/apis/stop-transform.asciidoc @@ -7,6 +7,12 @@ Stop {transforms} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Stops one or more {transforms}. diff --git a/docs/reference/transform/apis/transform-apis.asciidoc b/docs/reference/transform/apis/transform-apis.asciidoc index 20e5960e5bb18..45131f0214f9d 100644 --- a/docs/reference/transform/apis/transform-apis.asciidoc +++ b/docs/reference/transform/apis/transform-apis.asciidoc @@ -2,6 +2,12 @@ [[transform-apis]] = {transform-cap} APIs +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + * <> * <> * <> diff --git a/docs/reference/transform/apis/update-transform.asciidoc b/docs/reference/transform/apis/update-transform.asciidoc index 1ac7d6d5410d2..c473ca0f83b8a 100644 --- a/docs/reference/transform/apis/update-transform.asciidoc +++ b/docs/reference/transform/apis/update-transform.asciidoc @@ -7,6 +7,12 @@ Update {transform} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Updates certain properties of a {transform}. [[update-transform-request]] diff --git a/docs/reference/transform/apis/upgrade-transforms.asciidoc b/docs/reference/transform/apis/upgrade-transforms.asciidoc index a1b01a6fd146a..826243938a9f4 100644 --- a/docs/reference/transform/apis/upgrade-transforms.asciidoc +++ b/docs/reference/transform/apis/upgrade-transforms.asciidoc @@ -7,6 +7,12 @@ Upgrade {transforms} ++++ +.New API reference +[sidebar] +-- +For the most up-to-date API details, refer to {api-es}/group/endpoint-transform[{transform-cap} APIs]. +-- + Upgrades all {transforms}. [[upgrade-transforms-request]] diff --git a/docs/reference/transform/painless-examples.asciidoc b/docs/reference/transform/painless-examples.asciidoc index 4b0802c79a340..3b4dd9bdb631d 100644 --- a/docs/reference/transform/painless-examples.asciidoc +++ b/docs/reference/transform/painless-examples.asciidoc @@ -8,8 +8,8 @@ IMPORTANT: The examples that use the `scripted_metric` aggregation are not supported on {es} Serverless. -These examples demonstrate how to use Painless in {transforms}. You can learn -more about the Painless scripting language in the +These examples demonstrate how to use Painless in {transforms}. You can learn +more about the Painless scripting language in the {painless}/painless-guide.html[Painless guide]. * <> @@ -20,24 +20,24 @@ more about the Painless scripting language in the * <> * <> -[NOTE] +[NOTE] -- -* While the context of the following examples is the {transform} use case, -the Painless scripts in the snippets below can be used in other {es} search +* While the context of the following examples is the {transform} use case, +the Painless scripts in the snippets below can be used in other {es} search aggregations, too. -* All the following examples use scripts, {transforms} cannot deduce mappings of -output fields when the fields are created by a script. {transforms-cap} don't -create any mappings in the destination index for these fields, which means they -get dynamically mapped. Create the destination index prior to starting the +* All the following examples use scripts, {transforms} cannot deduce mappings of +output fields when the fields are created by a script. {transforms-cap} don't +create any mappings in the destination index for these fields, which means they +get dynamically mapped. Create the destination index prior to starting the {transform} in case you want explicit mappings. -- [[painless-top-hits]] == Getting top hits by using scripted metric aggregation -This snippet shows how to find the latest document, in other words the document -with the latest timestamp. From a technical perspective, it helps to achieve -the function of a <> by using +This snippet shows how to find the latest document, in other words the document +with the latest timestamp. From a technical perspective, it helps to achieve +the function of a <> by using scripted metric aggregation in a {transform}, which provides a metric output. IMPORTANT: This example uses a `scripted_metric` aggregation which is not supported on {es} Serverless. @@ -45,12 +45,12 @@ IMPORTANT: This example uses a `scripted_metric` aggregation which is not suppor [source,js] -------------------------------------------------- "aggregations": { - "latest_doc": { + "latest_doc": { "scripted_metric": { "init_script": "state.timestamp_latest = 0L; state.last_doc = ''", <1> "map_script": """ <2> - def current_date = doc['@timestamp'].getValue().toInstant().toEpochMilli(); - if (current_date > state.timestamp_latest) + def current_date = doc['@timestamp'].getValue().toInstant().toEpochMilli(); + if (current_date > state.timestamp_latest) {state.timestamp_latest = current_date; state.last_doc = new HashMap(params['_source']);} """, @@ -59,7 +59,7 @@ IMPORTANT: This example uses a `scripted_metric` aggregation which is not suppor def last_doc = ''; def timestamp_latest = 0L; for (s in states) {if (s.timestamp_latest > (timestamp_latest)) - {timestamp_latest = s.timestamp_latest; last_doc = s.last_doc;}} + {timestamp_latest = s.timestamp_latest; last_doc = s.last_doc;}} return last_doc """ } @@ -68,23 +68,23 @@ IMPORTANT: This example uses a `scripted_metric` aggregation which is not suppor -------------------------------------------------- // NOTCONSOLE -<1> The `init_script` creates a long type `timestamp_latest` and a string type +<1> The `init_script` creates a long type `timestamp_latest` and a string type `last_doc` in the `state` object. -<2> The `map_script` defines `current_date` based on the timestamp of the -document, then compares `current_date` with `state.timestamp_latest`, finally -returns `state.last_doc` from the shard. By using `new HashMap(...)` you copy -the source document, this is important whenever you want to pass the full source +<2> The `map_script` defines `current_date` based on the timestamp of the +document, then compares `current_date` with `state.timestamp_latest`, finally +returns `state.last_doc` from the shard. By using `new HashMap(...)` you copy +the source document, this is important whenever you want to pass the full source object from one phase to the next. <3> The `combine_script` returns `state` from each shard. -<4> The `reduce_script` iterates through the value of `s.timestamp_latest` -returned by each shard and returns the document with the latest timestamp -(`last_doc`). In the response, the top hit (in other words, the `latest_doc`) is +<4> The `reduce_script` iterates through the value of `s.timestamp_latest` +returned by each shard and returns the document with the latest timestamp +(`last_doc`). In the response, the top hit (in other words, the `latest_doc`) is nested below the `latest_doc` field. -Check the <> for detailed +Check the <> for detailed explanation on the respective scripts. -You can retrieve the last value in a similar way: +You can retrieve the last value in a similar way: [source,js] -------------------------------------------------- @@ -93,17 +93,17 @@ You can retrieve the last value in a similar way: "scripted_metric": { "init_script": "state.timestamp_latest = 0L; state.last_value = ''", "map_script": """ - def current_date = doc['@timestamp'].getValue().toInstant().toEpochMilli(); - if (current_date > state.timestamp_latest) + def current_date = doc['@timestamp'].getValue().toInstant().toEpochMilli(); + if (current_date > state.timestamp_latest) {state.timestamp_latest = current_date; state.last_value = params['_source']['value'];} """, "combine_script": "return state", "reduce_script": """ def last_value = ''; - def timestamp_latest = 0L; - for (s in states) {if (s.timestamp_latest > (timestamp_latest)) - {timestamp_latest = s.timestamp_latest; last_value = s.last_value;}} + def timestamp_latest = 0L; + for (s in states) {if (s.timestamp_latest > (timestamp_latest)) + {timestamp_latest = s.timestamp_latest; last_value = s.last_value;}} return last_value """ } @@ -117,10 +117,10 @@ You can retrieve the last value in a similar way: [[top-hits-stored-scripts]] === Getting top hits by using stored scripts -You can also use the power of -{ref}/create-stored-script-api.html[stored scripts] to get the latest value. -Stored scripts reduce compilation time, make searches faster, and are -updatable. +You can also use the power of +{ref}/create-stored-script-api.html[stored scripts] to get the latest value. +Stored scripts are updatable, enable collaboration, and avoid duplication across +queries. 1. Create the stored scripts: + @@ -202,7 +202,7 @@ POST _scripts/last-value-reduce } -------------------------------------------------- // NOTCONSOLE -<1> The parameter `field_with_last_value` can be set any field that you want the +<1> The parameter `field_with_last_value` can be set any field that you want the latest value for. -- @@ -210,8 +210,8 @@ latest value for. [[painless-time-features]] == Getting time features by using aggregations -This snippet shows how to extract time based features by using Painless in a -{transform}. The snippet uses an index where `@timestamp` is defined as a `date` +This snippet shows how to extract time based features by using Painless in a +{transform}. The snippet uses an index where `@timestamp` is defined as a `date` type field. [source,js] @@ -225,11 +225,11 @@ type field. return date.getHour(); <4> """ } - } + } }, "avg_month_of_year": { <5> "avg":{ - "script": { <6> + "script": { <6> "source": """ ZonedDateTime date = doc['@timestamp'].value; <7> return date.getMonthValue(); <8> @@ -255,9 +255,9 @@ type field. [[painless-group-by]] == Using Painless in `group_by` -It is possible to base the `group_by` property of a {transform} on the output of -a script. The following example uses the {kib} sample web logs dataset. The goal -here is to make the {transform} output easier to understand through normalizing +It is possible to base the `group_by` property of a {transform} on the output of +a script. The following example uses the {kib} sample web logs dataset. The goal +here is to make the {transform} output easier to understand through normalizing the value of the fields that the data is grouped by. [source,console] @@ -274,12 +274,12 @@ POST _transform/_preview "agent": { "terms": { "script": { <2> - "source": """String agent = doc['agent.keyword'].value; - if (agent.contains("MSIE")) { + "source": """String agent = doc['agent.keyword'].value; + if (agent.contains("MSIE")) { return "internet explorer"; - } else if (agent.contains("AppleWebKit")) { - return "safari"; - } else if (agent.contains('Firefox')) { + } else if (agent.contains("AppleWebKit")) { + return "safari"; + } else if (agent.contains('Firefox')) { return "firefox"; } else { return agent }""", "lang": "painless" @@ -314,18 +314,18 @@ POST _transform/_preview "dest": { <4> "index": "pivot_logs" } -} +} -------------------------------------------------- // TEST[skip:setup kibana sample data] <1> Specifies the source index or indices. -<2> The script defines an `agent` string based on the `agent` field of the -documents, then iterates through the values. If an `agent` field contains -"MSIE", than the script returns "Internet Explorer". If it contains -`AppleWebKit`, it returns "safari". It returns "firefox" if the field value -contains "Firefox". Finally, in every other case, the value of the field is +<2> The script defines an `agent` string based on the `agent` field of the +documents, then iterates through the values. If an `agent` field contains +"MSIE", than the script returns "Internet Explorer". If it contains +`AppleWebKit`, it returns "safari". It returns "firefox" if the field value +contains "Firefox". Finally, in every other case, the value of the field is returned. -<3> The aggregations object contains filters that narrow down the results to +<3> The aggregations object contains filters that narrow down the results to documents that contains `200`, `404`, or `503` values in the `response` field. <4> Specifies the destination index of the {transform}. @@ -374,14 +374,14 @@ The API returns the following result: -------------------------------------------------- // NOTCONSOLE -You can see that the `agent` values are simplified so it is easier to interpret -them. The table below shows how normalization modifies the output of the +You can see that the `agent` values are simplified so it is easier to interpret +them. The table below shows how normalization modifies the output of the {transform} in our example compared to the non-normalized values. [width="50%"] |=== -| Non-normalized `agent` value | Normalized `agent` value +| Non-normalized `agent` value | Normalized `agent` value | "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)" | "internet explorer" | "Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.50 Safari/534.24" | "safari" @@ -393,9 +393,9 @@ them. The table below shows how normalization modifies the output of the [[painless-bucket-script]] == Getting duration by using bucket script -This example shows you how to get the duration of a session by client IP from a -data log by using -<>. +This example shows you how to get the duration of a session by client IP from a +data log by using +<>. The example uses the {kib} sample web logs dataset. [source,console] @@ -440,22 +440,22 @@ PUT _transform/data_log // TEST[skip:setup kibana sample data] <1> To define the length of the sessions, we use a bucket script. -<2> The bucket path is a map of script variables and their associated path to -the buckets you want to use for the variable. In this particular case, `min` and +<2> The bucket path is a map of script variables and their associated path to +the buckets you want to use for the variable. In this particular case, `min` and `max` are variables mapped to `time_frame.gte.value` and `time_frame.lte.value`. -<3> Finally, the script substracts the start date of the session from the end +<3> Finally, the script substracts the start date of the session from the end date which results in the duration of the session. [[painless-count-http]] == Counting HTTP responses by using scripted metric aggregation -You can count the different HTTP response types in a web log data set by using -scripted metric aggregation as part of the {transform}. You can achieve a -similar function with filter aggregations, check the -{ref}/transform-examples.html#example-clientips[Finding suspicious client IPs] +You can count the different HTTP response types in a web log data set by using +scripted metric aggregation as part of the {transform}. You can achieve a +similar function with filter aggregations, check the +{ref}/transform-examples.html#example-clientips[Finding suspicious client IPs] example for details. -The example below assumes that the HTTP response codes are stored as keywords in +The example below assumes that the HTTP response codes are stored as keywords in the `response` field of the documents. IMPORTANT: This example uses a `scripted_metric` aggregation which is not supported on {es} Serverless. @@ -488,32 +488,32 @@ IMPORTANT: This example uses a `scripted_metric` aggregation which is not suppor """ } }, - ... + ... } -------------------------------------------------- // NOTCONSOLE <1> The `aggregations` object of the {transform} that contains all aggregations. <2> Object of the `scripted_metric` aggregation. -<3> This `scripted_metric` performs a distributed operation on the web log data +<3> This `scripted_metric` performs a distributed operation on the web log data to count specific types of HTTP responses (error, success, and other). -<4> The `init_script` creates a `responses` array in the `state` object with +<4> The `init_script` creates a `responses` array in the `state` object with three properties (`error`, `success`, `other`) with long data type. -<5> The `map_script` defines `code` based on the `response.keyword` value of the -document, then it counts the errors, successes, and other responses based on the +<5> The `map_script` defines `code` based on the `response.keyword` value of the +document, then it counts the errors, successes, and other responses based on the first digit of the responses. <6> The `combine_script` returns `state.responses` from each shard. -<7> The `reduce_script` creates a `counts` array with the `error`, `success`, -and `other` properties, then iterates through the value of `responses` returned -by each shard and assigns the different response types to the appropriate -properties of the `counts` object; error responses to the error counts, success -responses to the success counts, and other responses to the other counts. +<7> The `reduce_script` creates a `counts` array with the `error`, `success`, +and `other` properties, then iterates through the value of `responses` returned +by each shard and assigns the different response types to the appropriate +properties of the `counts` object; error responses to the error counts, success +responses to the success counts, and other responses to the other counts. Finally, returns the `counts` array with the response counts. [[painless-compare]] == Comparing indices by using scripted metric aggregations -This example shows how to compare the content of two indices by a {transform} +This example shows how to compare the content of two indices by a {transform} that uses a scripted metric aggregation. IMPORTANT: This example uses a `scripted_metric` aggregation which is not supported on {es} Serverless. @@ -570,19 +570,19 @@ POST _transform/_preview <2> The `dest` index contains the results of the comparison. <3> The `group_by` field needs to be a unique identifier for each document. <4> Object of the `scripted_metric` aggregation. -<5> The `map_script` defines `doc` in the state object. By using -`new HashMap(...)` you copy the source document, this is important whenever you +<5> The `map_script` defines `doc` in the state object. By using +`new HashMap(...)` you copy the source document, this is important whenever you want to pass the full source object from one phase to the next. <6> The `combine_script` returns `state` from each shard. -<7> The `reduce_script` checks if the size of the indices are equal. If they are -not equal, than it reports back a `count_mismatch`. Then it iterates through all -the values of the two indices and compare them. If the values are equal, then it +<7> The `reduce_script` checks if the size of the indices are equal. If they are +not equal, than it reports back a `count_mismatch`. Then it iterates through all +the values of the two indices and compare them. If the values are equal, then it returns a `match`, otherwise returns a `mismatch`. [[painless-web-session]] == Getting web session details by using scripted metric aggregation -This example shows how to derive multiple features from a single transaction. +This example shows how to derive multiple features from a single transaction. Let's take a look on the example source document from the data: .Source document @@ -628,8 +628,8 @@ Let's take a look on the example source document from the data: ===== -By using the `sessionid` as a group-by field, you are able to enumerate events -through the session and get more details of the session by using scripted metric +By using the `sessionid` as a group-by field, you are able to enumerate events +through the session and get more details of the session by using scripted metric aggregation. IMPORTANT: This example uses a `scripted_metric` aggregation which is not supported on {es} Serverless. @@ -650,7 +650,7 @@ POST _transform/_preview } }, "aggregations": { <2> - "distinct_paths": { + "distinct_paths": { "cardinality": { "field": "apache.access.path" } @@ -665,21 +665,21 @@ POST _transform/_preview "init_script": "state.docs = []", <3> "map_script": """ <4> Map span = [ - '@timestamp':doc['@timestamp'].value, + '@timestamp':doc['@timestamp'].value, 'url':doc['apache.access.url'].value, 'referrer':doc['apache.access.referrer'].value - ]; + ]; state.docs.add(span) """, "combine_script": "return state.docs;", <5> "reduce_script": """ <6> - def all_docs = []; - for (s in states) { - for (span in s) { - all_docs.add(span); + def all_docs = []; + for (s in states) { + for (span in s) { + all_docs.add(span); } } - all_docs.sort((HashMap o1, HashMap o2)->o1['@timestamp'].toEpochMilli().compareTo(o2['@timestamp'].toEpochMilli())); + all_docs.sort((HashMap o1, HashMap o2)->o1['@timestamp'].toEpochMilli().compareTo(o2['@timestamp'].toEpochMilli())); def size = all_docs.size(); def min_time = all_docs[0]['@timestamp']; def max_time = all_docs[size-1]['@timestamp']; @@ -705,17 +705,17 @@ POST _transform/_preview // NOTCONSOLE <1> The data is grouped by `sessionid`. -<2> The aggregations counts the number of paths and enumerate the viewed pages +<2> The aggregations counts the number of paths and enumerate the viewed pages during the session. <3> The `init_script` creates an array type `doc` in the `state` object. -<4> The `map_script` defines a `span` array with a timestamp, a URL, and a -referrer value which are based on the corresponding values of the document, then +<4> The `map_script` defines a `span` array with a timestamp, a URL, and a +referrer value which are based on the corresponding values of the document, then adds the value of the `span` array to the `doc` object. <5> The `combine_script` returns `state.docs` from each shard. -<6> The `reduce_script` defines various objects like `min_time`, `max_time`, and -`duration` based on the document fields, then declares a `ret` object, and -copies the source document by using `new HashMap ()`. Next, the script defines -`first_time`, `last_time`, `duration` and other fields inside the `ret` object +<6> The `reduce_script` defines various objects like `min_time`, `max_time`, and +`duration` based on the document fields, then declares a `ret` object, and +copies the source document by using `new HashMap ()`. Next, the script defines +`first_time`, `last_time`, `duration` and other fields inside the `ret` object based on the corresponding object defined earlier, finally returns `ret`. The API call results in a similar response: diff --git a/docs/reference/transform/setup.asciidoc b/docs/reference/transform/setup.asciidoc index dab357546d93e..3171086e43d61 100644 --- a/docs/reference/transform/setup.asciidoc +++ b/docs/reference/transform/setup.asciidoc @@ -11,7 +11,7 @@ To use {transforms}, you must have: -* at least one <>, +* at least one <>, * management features visible in the {kib} space, and * security privileges that: + diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index ceff8619062c4..75cd0c1be49fc 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -6,6 +6,10 @@ This section provides a series of troubleshooting solutions aimed at helping users fix problems that an {es} deployment might encounter. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [discrete] [[troubleshooting-general]] === General diff --git a/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc b/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc index fbc6fe7b42a7f..8ebe962eaed80 100644 --- a/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc +++ b/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc @@ -9,6 +9,13 @@ By default, the <> triggers at 95% JVM memory usage. To prevent errors, we recommend taking steps to reduce memory pressure if usage consistently exceeds 85%. +See https://www.youtube.com/watch?v=k3wYlRVbMSw[this video] for a walkthrough +of diagnosing circuit breaker errors. + +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [discrete] [[diagnose-circuit-breaker-errors]] ==== Diagnose circuit breaker errors diff --git a/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc b/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc index e1ceefb92bbec..c6bbcd69b0555 100644 --- a/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc +++ b/docs/reference/troubleshooting/common-issues/diagnose-unassigned-shards.asciidoc @@ -11,3 +11,7 @@ include::{es-ref-dir}/tab-widgets/troubleshooting/data/diagnose-unassigned-shard See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video] for a walkthrough of monitoring allocation health. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + diff --git a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc index 2222d09c26db2..a2342c449c88c 100644 --- a/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc +++ b/docs/reference/troubleshooting/common-issues/disk-usage-exceeded.asciidoc @@ -21,6 +21,10 @@ usage falls below the <>. To achieve this, {es} attempts to rebalance some of the affected node's shards to other nodes in the same data tier. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [[fix-watermark-errors-rebalance]] ==== Monitor rebalancing diff --git a/docs/reference/troubleshooting/common-issues/high-cpu-usage.asciidoc b/docs/reference/troubleshooting/common-issues/high-cpu-usage.asciidoc index 96a9a8f1e32b7..7d226a891251b 100644 --- a/docs/reference/troubleshooting/common-issues/high-cpu-usage.asciidoc +++ b/docs/reference/troubleshooting/common-issues/high-cpu-usage.asciidoc @@ -11,6 +11,10 @@ depleted, {es} will reject search requests until more threads are available. You might experience high CPU usage if a <>, and therefore the nodes assigned to that tier, is experiencing more traffic than other tiers. This imbalance in resource utilization is also known as <>. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [discrete] [[diagnose-high-cpu-usage]] ==== Diagnose high CPU usage diff --git a/docs/reference/troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc b/docs/reference/troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc index 3469a0ca5bf42..842b7edda73ac 100644 --- a/docs/reference/troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc +++ b/docs/reference/troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc @@ -6,6 +6,10 @@ High JVM memory usage can degrade cluster performance and trigger taking steps to reduce memory pressure if a node's JVM memory usage consistently exceeds 85%. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [discrete] [[diagnose-high-jvm-memory-pressure]] ==== Diagnose high JVM memory pressure diff --git a/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc b/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc index 344e36a999a34..53226dbfedb56 100644 --- a/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc +++ b/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc @@ -11,6 +11,10 @@ may occur in {es} when resource utilizations are unevenly distributed across ongoing significantly unique utilization may lead to cluster bottlenecks and should be reviewed. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + See link:https://www.youtube.com/watch?v=Q5ODJ5nIKAM[this video] for a walkthrough of troubleshooting a hot spotting issue. [discrete] diff --git a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc index 4289242deb486..c07e92c058991 100644 --- a/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc +++ b/docs/reference/troubleshooting/common-issues/red-yellow-cluster-status.asciidoc @@ -22,6 +22,10 @@ the remaining problems so management and cleanup activities can proceed. See https://www.youtube.com/watch?v=v2mbeSd1vTQ[this video] for a walkthrough of monitoring allocation health. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [discrete] [[diagnose-cluster-status]] ==== Diagnose your cluster status diff --git a/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc b/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc index c863709775fcd..7b0e3eb51680e 100644 --- a/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc +++ b/docs/reference/troubleshooting/common-issues/rejected-requests.asciidoc @@ -12,6 +12,10 @@ thread pool returns a `TOO_MANY_REQUESTS` error message. * High <> that exceeds the <>. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [discrete] [[check-rejected-tasks]] ==== Check rejected tasks @@ -32,6 +36,9 @@ correlating log as `EsRejectedExecutionException` with either These errors are often related to <>. +See https://www.youtube.com/watch?v=auZJRXoAVpI[this video] for a walkthrough +of troubleshooting threadpool rejections. + [discrete] [[check-circuit-breakers]] ==== Check circuit breakers @@ -47,6 +54,9 @@ GET /_nodes/stats/breaker These statistics are cumulative from node startup. For more information, see <>. +See https://www.youtube.com/watch?v=k3wYlRVbMSw[this video] for a walkthrough +of diagnosing circuit breaker errors. + [discrete] [[check-indexing-pressure]] ==== Check indexing pressure @@ -63,12 +73,15 @@ These stats are cumulative from node startup. Indexing pressure rejections appear as an `EsRejectedExecutionException`, and indicate that they were rejected due -to `coordinating_and_primary_bytes`, `coordinating`, `primary`, or `replica`. +to `combined_coordinating_and_primary`, `coordinating`, `primary`, or `replica`. These errors are often related to <>, <> sizing, or the ingest target's <>. +See https://www.youtube.com/watch?v=QuV8QqSfc0c[this video] for a walkthrough +of diagnosing indexing pressure rejections. + [discrete] [[prevent-rejected-requests]] ==== Prevent rejected requests @@ -77,4 +90,4 @@ These errors are often related to <>, If {es} regularly rejects requests and other tasks, your cluster likely has high CPU usage or high JVM memory pressure. For tips, see <> and -<>. \ No newline at end of file +<>. diff --git a/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc b/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc index 5aa6a0129c2d4..f233f22cb3fbe 100644 --- a/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc +++ b/docs/reference/troubleshooting/common-issues/task-queue-backlog.asciidoc @@ -1,103 +1,149 @@ [[task-queue-backlog]] -=== Task queue backlog +=== Backlogged task queue -A backlogged task queue can prevent tasks from completing and put the cluster -into an unhealthy state. Resource constraints, a large number of tasks being -triggered at once, and long running tasks can all contribute to a backlogged -task queue. +******************************* +*Product:* Elasticsearch + +*Deployment type:* Elastic Cloud Enterprise, Elastic Cloud Hosted, Elastic Cloud on Kubernetes, Elastic Self-Managed + +*Versions:* All +******************************* + +A backlogged task queue can prevent tasks from completing and lead to an +unhealthy cluster state. Contributing factors include resource constraints, +a large number of tasks triggered at once, and long-running tasks. [discrete] [[diagnose-task-queue-backlog]] -==== Diagnose a task queue backlog +==== Diagnose a backlogged task queue + +To identify the cause of the backlog, try these diagnostic actions. -**Check the thread pool status** +* <> +* <> +* <> +* <> + +[discrete] +[[diagnose-task-queue-thread-pool]] +===== Check the thread pool status A <> can result in <>. -Thread pool depletion might be restricted to a specific <>. If <> is occuring, one node might experience depletion faster than other nodes, leading to performance issues and a growing task backlog. - -You can use the <> to see the number of -active threads in each thread pool and how many tasks are queued, how many -have been rejected, and how many have completed. +Use the <> to monitor +active threads, queued tasks, rejections, and completed tasks: [source,console] ---- GET /_cat/thread_pool?v&s=t,n&h=type,name,node_name,active,queue,rejected,completed ---- -The `active` and `queue` statistics are instantaneous while the `rejected` and -`completed` statistics are cumulative from node startup. +* Look for high `active` and `queue` metrics, which indicate potential bottlenecks +and opportunities to <>. +* Determine whether thread pool issues are specific to a <>. +* Check whether a specific node's thread pool is depleting faster than others. This +might indicate <>. -**Inspect the hot threads on each node** +[discrete] +[[diagnose-task-queue-hot-thread]] +===== Inspect hot threads on each node -If a particular thread pool queue is backed up, you can periodically poll the -<> API to determine if the thread -has sufficient resources to progress and gauge how quickly it is progressing. +If a particular thread pool queue is backed up, periodically poll the +<> to gauge the thread's +progression and ensure it has sufficient resources: [source,console] ---- GET /_nodes/hot_threads ---- -**Look for long running node tasks** +Although the hot threads API response does not list the specific tasks running on a thread, +it provides a summary of the thread's activities. You can correlate a hot threads response +with a <> to identify any overlap with specific tasks. For +example, if the hot threads response indicates the thread is `performing a search query`, you can +<> using the task management API. + +[discrete] +[[diagnose-task-queue-long-running-node-tasks]] +===== Identify long-running node tasks -Long-running tasks can also cause a backlog. You can use the <> API to get information about the node tasks that are running. -Check the `running_time_in_nanos` to identify tasks that are taking an -excessive amount of time to complete. +Long-running tasks can also cause a backlog. Use the <> to check for excessive `running_time_in_nanos` values: [source,console] ---- GET /_tasks?pretty=true&human=true&detailed=true ---- -If a particular `action` is suspected, you can filter the tasks further. The most common long-running tasks are <>- or search-related. +You can filter on a specific `action`, such as <> or search-related tasks. +These tend to be long-running. -* Filter for <> actions: +* Filter on <> actions: + [source,console] ---- GET /_tasks?human&detailed&actions=indices:data/write/bulk ---- -* Filter for search actions: +* Filter on search actions: + [source,console] ---- GET /_tasks?human&detailed&actions=indices:data/write/search ---- -The API response may contain additional tasks columns, including `description` and `header`, which provides the task parameters, target, and requestor. You can use this information to perform further diagnosis. +Long-running tasks might need to be <>. -**Look for long running cluster tasks** +[discrete] +[[diagnose-task-queue-long-running-cluster-tasks]] +===== Look for long-running cluster tasks -A task backlog might also appear as a delay in synchronizing the cluster state. You -can use the <> to get information -about the pending cluster state sync tasks that are running. +Use the <> to identify delays +in cluster state synchronization: [source,console] ---- GET /_cluster/pending_tasks ---- -Check the `timeInQueue` to identify tasks that are taking an excessive amount -of time to complete. +Tasks with a high `timeInQueue` value are likely contributing to the backlog and might +need to be <>. [discrete] [[resolve-task-queue-backlog]] -==== Resolve a task queue backlog +==== Recommendations + +After identifying problematic threads and tasks, resolve the issue by increasing resources or canceling tasks. -**Increase available resources** +[discrete] +[[resolve-task-queue-backlog-resources]] +===== Increase available resources -If tasks are progressing slowly and the queue is backing up, -you might need to take steps to <>. +If tasks are progressing slowly, try <>. -In some cases, increasing the thread pool size might help. -For example, the `force_merge` thread pool defaults to a single thread. +In some cases, you might need to increase the thread pool size. For example, the `force_merge` thread pool defaults to a single thread. Increasing the size to 2 might help reduce a backlog of force merge requests. -**Cancel stuck tasks** +[discrete] +[[resolve-task-queue-backlog-stuck-tasks]] +===== Cancel stuck tasks + +If an active task's <> shows no progress, consider <>. + +[discrete] +[[resolve-task-queue-backlog-hotspotting]] +===== Address hot spotting + +If a specific node's thread pool is depleting faster than others, try addressing +uneven node resource utilization, also known as hot spotting. +For details on actions you can take, such as rebalancing shards, see <>. + +[discrete] +==== Resources + +Related symptoms: + +* <> +* <> +* <> -If you find the active task's hot thread isn't progressing and there's a backlog, -consider canceling the task. \ No newline at end of file +// TODO add link to standard Additional resources when that topic exists diff --git a/docs/reference/troubleshooting/data/increase-cluster-shard-limit.asciidoc b/docs/reference/troubleshooting/data/increase-cluster-shard-limit.asciidoc index 8b8703f9a9dc1..ddd0881385524 100644 --- a/docs/reference/troubleshooting/data/increase-cluster-shard-limit.asciidoc +++ b/docs/reference/troubleshooting/data/increase-cluster-shard-limit.asciidoc @@ -16,5 +16,8 @@ In order to fix this follow the next steps: include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-cluster-shard-limit-widget.asciidoc[] +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** diff --git a/docs/reference/troubleshooting/data/increase-shard-limit.asciidoc b/docs/reference/troubleshooting/data/increase-shard-limit.asciidoc index 121b5348ab36a..51bb9fda09fef 100644 --- a/docs/reference/troubleshooting/data/increase-shard-limit.asciidoc +++ b/docs/reference/troubleshooting/data/increase-shard-limit.asciidoc @@ -14,5 +14,9 @@ In order to fix this follow the next steps: include::{es-ref-dir}/tab-widgets/troubleshooting/data/total-shards-per-node-widget.asciidoc[] +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + diff --git a/docs/reference/troubleshooting/data/increase-tier-capacity.asciidoc b/docs/reference/troubleshooting/data/increase-tier-capacity.asciidoc index 362a14c3874db..09d7fedb89e6a 100644 --- a/docs/reference/troubleshooting/data/increase-tier-capacity.asciidoc +++ b/docs/reference/troubleshooting/data/increase-tier-capacity.asciidoc @@ -17,5 +17,8 @@ In order to fix this follow the next steps: include::{es-ref-dir}/tab-widgets/troubleshooting/data/increase-tier-capacity-widget.asciidoc[] +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** diff --git a/docs/reference/troubleshooting/diagnostic.asciidoc b/docs/reference/troubleshooting/diagnostic.asciidoc index c6d46b9e94fc8..d806fa2b986c8 100644 --- a/docs/reference/troubleshooting/diagnostic.asciidoc +++ b/docs/reference/troubleshooting/diagnostic.asciidoc @@ -15,6 +15,10 @@ https://discuss.elastic.co[Elastic Discuss] to minimize turnaround time. See this https://www.youtube.com/watch?v=Bb6SaqhqYHw[this video] for a walkthrough of capturing an {es} diagnostic. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + [discrete] [[diagnostic-tool-requirements]] === Requirements diff --git a/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc b/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc index 47244ac17e99d..5aa90215d7ae3 100644 --- a/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc +++ b/docs/reference/troubleshooting/fix-common-cluster-issues.asciidoc @@ -3,6 +3,10 @@ This guide describes how to fix common errors and problems with {es} clusters. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + <>:: Fix watermark errors that occur when a data node is critically low on disk space and has reached the flood-stage disk usage watermark. diff --git a/docs/reference/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc b/docs/reference/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc index 2496781c0c8f4..7a7e606e2ca7d 100644 --- a/docs/reference/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc +++ b/docs/reference/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc @@ -14,5 +14,8 @@ information about the problem: include::{es-ref-dir}/tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures-widget.asciidoc[] +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** diff --git a/docs/reference/troubleshooting/troubleshooting-shards-capacity.asciidoc b/docs/reference/troubleshooting/troubleshooting-shards-capacity.asciidoc index bc8fb7290f1ed..d9871855faa84 100644 --- a/docs/reference/troubleshooting/troubleshooting-shards-capacity.asciidoc +++ b/docs/reference/troubleshooting/troubleshooting-shards-capacity.asciidoc @@ -8,3 +8,7 @@ The current shards capacity of the cluster is available in the <>. include::{es-ref-dir}/tab-widgets/troubleshooting/troubleshooting-shards-capacity-widget.asciidoc[] + +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** diff --git a/docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc b/docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc index a1d4f5df9c4f6..4ac3155c7fa8e 100644 --- a/docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc +++ b/docs/reference/troubleshooting/troubleshooting-unbalanced-cluster.asciidoc @@ -7,6 +7,10 @@ Elasticsearch balances shards across data tiers to achieve a good compromise bet * disk usage * write load (for indices in data streams) +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + Elasticsearch does not take into account the amount or complexity of search queries when rebalancing shards. This is indirectly achieved by balancing shard count and disk usage. diff --git a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc index e47b85aa99547..ab0faa3b6c710 100644 --- a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc +++ b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc @@ -17,6 +17,10 @@ logs. * The master may appear busy due to frequent cluster state updates. +**** +If you're using Elastic Cloud Hosted, then you can use AutoOps to monitor your cluster. AutoOps significantly simplifies cluster management with performance recommendations, resource utilization visibility, real-time issue detection and resolution paths. For more information, refer to https://www.elastic.co/guide/en/cloud/current/ec-autoops.html[Monitor with AutoOps]. +**** + To troubleshoot a cluster in this state, first ensure the cluster has a <>. Next, focus on the nodes unexpectedly leaving the cluster ahead of all other issues. It will not be diff --git a/docs/reference/upgrade/disable-shard-alloc.asciidoc b/docs/reference/upgrade/disable-shard-alloc.asciidoc index f69a673095257..4672ea65446b4 100644 --- a/docs/reference/upgrade/disable-shard-alloc.asciidoc +++ b/docs/reference/upgrade/disable-shard-alloc.asciidoc @@ -5,7 +5,7 @@ starting to replicate the shards on that node to other nodes in the cluster, which can involve a lot of I/O. Since the node is shortly going to be restarted, this I/O is unnecessary. You can avoid racing the clock by <> of replicas before -shutting down <>: +shutting down <>: [source,console] -------------------------------------------------- diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 33addef8aedd0..2c46c4642e56e 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -949,14 +949,9 @@ - - - - - - - - + + + diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 22286c90de3d1..e712035eabc7b 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a -distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip +distributionSha256Sum=7ebdac923867a3cec0098302416d1e3c6c0c729fc4e2e05c10637a8af33a76c5 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/gradlew b/gradlew index f5feea6d6b116..f3b75f3b0d4fa 100755 --- a/gradlew +++ b/gradlew @@ -86,8 +86,7 @@ done # shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) -APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s -' "$PWD" ) || exit +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum diff --git a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java index 9f10739486238..eaf4d0ad98ef5 100644 --- a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java +++ b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImpl.java @@ -66,10 +66,20 @@ public MethodVisitor visitMethod( private static final Type CLASS_TYPE = Type.getType(Class.class); - static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] checkerMethodArgumentTypes) { - var classNameStartIndex = checkerMethodName.indexOf('$'); - var classNameEndIndex = checkerMethodName.lastIndexOf('$'); + static ParsedCheckerMethod parseCheckerMethodName(String checkerMethodName) { + boolean targetMethodIsStatic; + int classNameEndIndex = checkerMethodName.lastIndexOf("$$"); + int methodNameStartIndex; + if (classNameEndIndex == -1) { + targetMethodIsStatic = false; + classNameEndIndex = checkerMethodName.lastIndexOf('$'); + methodNameStartIndex = classNameEndIndex + 1; + } else { + targetMethodIsStatic = true; + methodNameStartIndex = classNameEndIndex + 2; + } + var classNameStartIndex = checkerMethodName.indexOf('$'); if (classNameStartIndex == -1 || classNameStartIndex >= classNameEndIndex) { throw new IllegalArgumentException( String.format( @@ -82,15 +92,22 @@ static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] ch ); } - // No "className" (check$$methodName) -> method is instance, and we'll get the class from the actual typed argument - final boolean targetMethodIsStatic = classNameStartIndex + 1 != classNameEndIndex; // No "methodName" (check$package_ClassName$) -> method is ctor final boolean targetMethodIsCtor = classNameEndIndex + 1 == checkerMethodName.length(); - final String targetMethodName = targetMethodIsCtor ? "" : checkerMethodName.substring(classNameEndIndex + 1); + final String targetMethodName = targetMethodIsCtor ? "" : checkerMethodName.substring(methodNameStartIndex); + + final String targetClassName = checkerMethodName.substring(classNameStartIndex + 1, classNameEndIndex).replace('_', '/'); + if (targetClassName.isBlank()) { + throw new IllegalArgumentException(String.format(Locale.ROOT, "Checker method %s has no class name", checkerMethodName)); + } + return new ParsedCheckerMethod(targetClassName, targetMethodName, targetMethodIsStatic, targetMethodIsCtor); + } + + static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] checkerMethodArgumentTypes) { + ParsedCheckerMethod checkerMethod = parseCheckerMethodName(checkerMethodName); - final String targetClassName; final List targetParameterTypes; - if (targetMethodIsStatic) { + if (checkerMethod.targetMethodIsStatic() || checkerMethod.targetMethodIsCtor()) { if (checkerMethodArgumentTypes.length < 1 || CLASS_TYPE.equals(checkerMethodArgumentTypes[0]) == false) { throw new IllegalArgumentException( String.format( @@ -101,7 +118,6 @@ static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] ch ); } - targetClassName = checkerMethodName.substring(classNameStartIndex + 1, classNameEndIndex).replace('_', '/'); targetParameterTypes = Arrays.stream(checkerMethodArgumentTypes).skip(1).map(Type::getInternalName).toList(); } else { if (checkerMethodArgumentTypes.length < 2 @@ -117,10 +133,15 @@ static MethodKey parseCheckerMethodSignature(String checkerMethodName, Type[] ch ) ); } - var targetClassType = checkerMethodArgumentTypes[1]; - targetClassName = targetClassType.getInternalName(); targetParameterTypes = Arrays.stream(checkerMethodArgumentTypes).skip(2).map(Type::getInternalName).toList(); } - return new MethodKey(targetClassName, targetMethodName, targetParameterTypes); + return new MethodKey(checkerMethod.targetClassName(), checkerMethod.targetMethodName(), targetParameterTypes); } + + private record ParsedCheckerMethod( + String targetClassName, + String targetMethodName, + boolean targetMethodIsStatic, + boolean targetMethodIsCtor + ) {} } diff --git a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java index 00efab829b2bb..06408941ac96e 100644 --- a/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java +++ b/libs/entitlement/asm-provider/src/main/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterImpl.java @@ -152,6 +152,7 @@ public MethodVisitor visitMethod(int access, String name, String descriptor, Str if (isAnnotationPresent == false) { boolean isStatic = (access & ACC_STATIC) != 0; boolean isCtor = "".equals(name); + boolean hasReceiver = (isStatic || isCtor) == false; var key = new MethodKey(className, name, Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList()); var instrumentationMethod = checkMethods.get(key); if (instrumentationMethod != null) { diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests.java index e3285cec8f883..ab0d96a8df96d 100644 --- a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests.java +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests.java @@ -32,17 +32,17 @@ public class InstrumentationServiceImplTests extends ESTestCase { static class TestTargetClass {} interface TestChecker { - void check$org_example_TestTargetClass$staticMethod(Class clazz, int arg0, String arg1, Object arg2); + void check$org_example_TestTargetClass$$staticMethod(Class clazz, int arg0, String arg1, Object arg2); - void check$$instanceMethodNoArgs(Class clazz, TestTargetClass that); + void check$org_example_TestTargetClass$instanceMethodNoArgs(Class clazz, TestTargetClass that); - void check$$instanceMethodWithArgs(Class clazz, TestTargetClass that, int x, int y); + void check$org_example_TestTargetClass$instanceMethodWithArgs(Class clazz, TestTargetClass that, int x, int y); } interface TestCheckerOverloads { - void check$org_example_TestTargetClass$staticMethodWithOverload(Class clazz, int x, int y); + void check$org_example_TestTargetClass$$staticMethodWithOverload(Class clazz, int x, int y); - void check$org_example_TestTargetClass$staticMethodWithOverload(Class clazz, int x, String y); + void check$org_example_TestTargetClass$$staticMethodWithOverload(Class clazz, int x, String y); } interface TestCheckerCtors { @@ -62,7 +62,7 @@ public void testInstrumentationTargetLookup() throws IOException { equalTo( new CheckMethod( "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestChecker", - "check$org_example_TestTargetClass$staticMethod", + "check$org_example_TestTargetClass$$staticMethod", List.of("Ljava/lang/Class;", "I", "Ljava/lang/String;", "Ljava/lang/Object;") ) ) @@ -71,17 +71,11 @@ public void testInstrumentationTargetLookup() throws IOException { assertThat( checkMethods, hasEntry( - equalTo( - new MethodKey( - "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", - "instanceMethodNoArgs", - List.of() - ) - ), + equalTo(new MethodKey("org/example/TestTargetClass", "instanceMethodNoArgs", List.of())), equalTo( new CheckMethod( "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestChecker", - "check$$instanceMethodNoArgs", + "check$org_example_TestTargetClass$instanceMethodNoArgs", List.of( "Ljava/lang/Class;", "Lorg/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass;" @@ -93,17 +87,11 @@ public void testInstrumentationTargetLookup() throws IOException { assertThat( checkMethods, hasEntry( - equalTo( - new MethodKey( - "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", - "instanceMethodWithArgs", - List.of("I", "I") - ) - ), + equalTo(new MethodKey("org/example/TestTargetClass", "instanceMethodWithArgs", List.of("I", "I"))), equalTo( new CheckMethod( "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestChecker", - "check$$instanceMethodWithArgs", + "check$org_example_TestTargetClass$instanceMethodWithArgs", List.of( "Ljava/lang/Class;", "Lorg/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass;", @@ -127,7 +115,7 @@ public void testInstrumentationTargetLookupWithOverloads() throws IOException { equalTo( new CheckMethod( "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestCheckerOverloads", - "check$org_example_TestTargetClass$staticMethodWithOverload", + "check$org_example_TestTargetClass$$staticMethodWithOverload", List.of("Ljava/lang/Class;", "I", "Ljava/lang/String;") ) ) @@ -140,7 +128,7 @@ public void testInstrumentationTargetLookupWithOverloads() throws IOException { equalTo( new CheckMethod( "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestCheckerOverloads", - "check$org_example_TestTargetClass$staticMethodWithOverload", + "check$org_example_TestTargetClass$$staticMethodWithOverload", List.of("Ljava/lang/Class;", "I", "I") ) ) @@ -182,7 +170,7 @@ public void testInstrumentationTargetLookupWithCtors() throws IOException { public void testParseCheckerMethodSignatureStaticMethod() { var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( - "check$org_example_TestClass$staticMethod", + "check$org_example_TestClass$$staticMethod", new Type[] { Type.getType(Class.class) } ); @@ -191,7 +179,7 @@ public void testParseCheckerMethodSignatureStaticMethod() { public void testParseCheckerMethodSignatureStaticMethodWithArgs() { var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( - "check$org_example_TestClass$staticMethod", + "check$org_example_TestClass$$staticMethod", new Type[] { Type.getType(Class.class), Type.getType("I"), Type.getType(String.class) } ); @@ -200,7 +188,7 @@ public void testParseCheckerMethodSignatureStaticMethodWithArgs() { public void testParseCheckerMethodSignatureStaticMethodInnerClass() { var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( - "check$org_example_TestClass$InnerClass$staticMethod", + "check$org_example_TestClass$InnerClass$$staticMethod", new Type[] { Type.getType(Class.class) } ); @@ -225,94 +213,80 @@ public void testParseCheckerMethodSignatureCtorWithArgs() { assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass", "", List.of("I", "java/lang/String")))); } - public void testParseCheckerMethodSignatureIncorrectName() { - var exception = assertThrows( - IllegalArgumentException.class, - () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$staticMethod", new Type[] { Type.getType(Class.class) }) - ); + public void testParseCheckerMethodSignatureOneDollarSign() { + assertParseCheckerMethodSignatureThrows("has incorrect name format", "check$method", Type.getType(Class.class)); + } - assertThat(exception.getMessage(), containsString("has incorrect name format")); + public void testParseCheckerMethodSignatureMissingClass() { + assertParseCheckerMethodSignatureThrows("has incorrect name format", "check$$staticMethod", Type.getType(Class.class)); + } + + public void testParseCheckerMethodSignatureBlankClass() { + assertParseCheckerMethodSignatureThrows("no class name", "check$$$staticMethod", Type.getType(Class.class)); } public void testParseCheckerMethodSignatureStaticMethodIncorrectArgumentCount() { - var exception = assertThrows( - IllegalArgumentException.class, - () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$ClassName$staticMethod", new Type[] {}) - ); - assertThat(exception.getMessage(), containsString("It must have a first argument of Class type")); + assertParseCheckerMethodSignatureThrows("It must have a first argument of Class type", "check$ClassName$staticMethod"); } public void testParseCheckerMethodSignatureStaticMethodIncorrectArgumentType() { - var exception = assertThrows( - IllegalArgumentException.class, - () -> InstrumentationServiceImpl.parseCheckerMethodSignature( - "check$ClassName$staticMethod", - new Type[] { Type.getType(String.class) } - ) + assertParseCheckerMethodSignatureThrows( + "It must have a first argument of Class type", + "check$ClassName$$staticMethod", + Type.getType(String.class) ); - assertThat(exception.getMessage(), containsString("It must have a first argument of Class type")); } public void testParseCheckerMethodSignatureInstanceMethod() { var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( - "check$$instanceMethod", + "check$org_example_TestClass$instanceMethod", new Type[] { Type.getType(Class.class), Type.getType(TestTargetClass.class) } ); - assertThat( - methodKey, - equalTo( - new MethodKey( - "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", - "instanceMethod", - List.of() - ) - ) - ); + assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass", "instanceMethod", List.of()))); } public void testParseCheckerMethodSignatureInstanceMethodWithArgs() { var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature( - "check$$instanceMethod", + "check$org_example_TestClass$instanceMethod", new Type[] { Type.getType(Class.class), Type.getType(TestTargetClass.class), Type.getType("I"), Type.getType(String.class) } ); - assertThat( - methodKey, - equalTo( - new MethodKey( - "org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetClass", - "instanceMethod", - List.of("I", "java/lang/String") - ) - ) - ); + assertThat(methodKey, equalTo(new MethodKey("org/example/TestClass", "instanceMethod", List.of("I", "java/lang/String")))); } public void testParseCheckerMethodSignatureInstanceMethodIncorrectArgumentTypes() { - var exception = assertThrows( - IllegalArgumentException.class, - () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$$instanceMethod", new Type[] { Type.getType(String.class) }) + assertParseCheckerMethodSignatureThrows( + "It must have a first argument of Class type", + "check$org_example_TestClass$instanceMethod", + Type.getType(String.class) ); - assertThat(exception.getMessage(), containsString("It must have a first argument of Class type")); } public void testParseCheckerMethodSignatureInstanceMethodIncorrectArgumentCount() { - var exception = assertThrows( - IllegalArgumentException.class, - () -> InstrumentationServiceImpl.parseCheckerMethodSignature("check$$instanceMethod", new Type[] { Type.getType(Class.class) }) + assertParseCheckerMethodSignatureThrows( + "a second argument of the class containing the method to instrument", + "check$org_example_TestClass$instanceMethod", + Type.getType(Class.class) ); - assertThat(exception.getMessage(), containsString("a second argument of the class containing the method to instrument")); } public void testParseCheckerMethodSignatureInstanceMethodIncorrectArgumentTypes2() { + assertParseCheckerMethodSignatureThrows( + "a second argument of the class containing the method to instrument", + "check$org_example_TestClass$instanceMethod", + Type.getType(Class.class), + Type.getType("I") + ); + } + + private static void assertParseCheckerMethodSignatureThrows(String messageText, String methodName, Type... methodArgs) { var exception = assertThrows( IllegalArgumentException.class, - () -> InstrumentationServiceImpl.parseCheckerMethodSignature( - "check$$instanceMethod", - new Type[] { Type.getType(Class.class), Type.getType("I") } - ) + () -> InstrumentationServiceImpl.parseCheckerMethodSignature(methodName, methodArgs) ); - assertThat(exception.getMessage(), containsString("a second argument of the class containing the method to instrument")); + + assertThat(exception.getMessage(), containsString(messageText)); } + } diff --git a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java index e9af1d152dd35..5a6bf409f1ac4 100644 --- a/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java +++ b/libs/entitlement/asm-provider/src/test/java/org/elasticsearch/entitlement/instrumentation/impl/InstrumenterTests.java @@ -85,9 +85,7 @@ public interface Testable { * This is a placeholder for real class library methods. * Without the java agent, we can't instrument the real methods, so we instrument this instead. *

- * Methods of this class must have the same signature and the same static/virtual condition as the corresponding real method. - * They should assert that the arguments came through correctly. - * They must not throw {@link TestException}. + * The instrumented copy of this class will not extend this class, but it will implement {@link Testable}. */ public static class TestClassToInstrument implements Testable { @@ -108,31 +106,36 @@ public static void anotherStaticMethod(int arg) {} /** * Interface to test specific, "synthetic" cases (e.g. overloaded methods, overloaded constructors, etc.) that - * may be not present/may be difficult to find or not clear in the production EntitlementChecker interface + * may be not present/may be difficult to find or not clear in the production EntitlementChecker interface. + *

+ * This interface isn't subject to the {@code check$} method naming conventions because it doesn't + * participate in the automated scan that configures the instrumenter based on the method names; + * instead, we configure the instrumenter minimally as needed for each test. */ public interface MockEntitlementChecker { - void checkSomeStaticMethod(Class clazz, int arg); + void checkSomeStaticMethod(Class callerClass, int arg); + + void checkSomeStaticMethodOverload(Class callerClass, int arg, String anotherArg); - void checkSomeStaticMethodOverload(Class clazz, int arg, String anotherArg); + void checkAnotherStaticMethod(Class callerClass, int arg); - void checkAnotherStaticMethod(Class clazz, int arg); + void checkSomeInstanceMethod(Class callerClass, Testable that, int arg, String anotherArg); - void checkSomeInstanceMethod(Class clazz, Testable that, int arg, String anotherArg); + void checkCtor(Class callerClass); - void checkCtor(Class clazz); + void checkCtorOverload(Class callerClass, int arg); - void checkCtorOverload(Class clazz, int arg); } public static class TestEntitlementCheckerHolder { - static TestEntitlementChecker checkerInstance = new TestEntitlementChecker(); + static MockEntitlementCheckerImpl checkerInstance = new MockEntitlementCheckerImpl(); public static MockEntitlementChecker instance() { return checkerInstance; } } - public static class TestEntitlementChecker implements MockEntitlementChecker { + public static class MockEntitlementCheckerImpl implements MockEntitlementChecker { /** * This allows us to test that the instrumentation is correct in both cases: * if the check throws, and if it doesn't. @@ -206,7 +209,7 @@ public void checkCtorOverload(Class callerClass, int arg) { @Before public void resetInstance() { - TestEntitlementCheckerHolder.checkerInstance = new TestEntitlementChecker(); + TestEntitlementCheckerHolder.checkerInstance = new MockEntitlementCheckerImpl(); } public void testStaticMethod() throws Exception { @@ -285,18 +288,16 @@ public void testConstructors() throws Exception { assertEquals(1, TestEntitlementCheckerHolder.checkerInstance.checkCtorIntCallCount); } - /** This test doesn't replace classToInstrument in-place but instead loads a separate - * class with the same class name plus a "_NEW" suffix (classToInstrument.class.getName() + "_NEW") - * that contains the instrumentation. Because of this, we need to configure the Transformer to use a - * MethodKey and instrumentationMethod with slightly different signatures (using the common interface - * Testable) which is not what would happen when it's run by the agent. + /** + * These tests don't replace classToInstrument in-place but instead load a separate class with the same class name. + * This requires a configuration slightly different from what we'd use in production. */ private static InstrumenterImpl createInstrumenter(Map methods) throws NoSuchMethodException { Map checkMethods = new HashMap<>(); for (var entry : methods.entrySet()) { checkMethods.put(getMethodKey(entry.getValue()), getCheckMethod(entry.getKey(), entry.getValue())); } - String checkerClass = Type.getInternalName(InstrumenterTests.MockEntitlementChecker.class); + String checkerClass = Type.getInternalName(MockEntitlementChecker.class); String handleClass = Type.getInternalName(InstrumenterTests.TestEntitlementCheckerHolder.class); String getCheckerClassMethodDescriptor = Type.getMethodDescriptor(Type.getObjectType(checkerClass)); diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index a6b8a31fc3894..69fc57973f68a 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -9,17 +9,73 @@ package org.elasticsearch.entitlement.bridge; +import java.io.InputStream; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.net.ContentHandlerFactory; +import java.net.DatagramPacket; +import java.net.DatagramSocket; +import java.net.DatagramSocketImplFactory; +import java.net.FileNameMap; +import java.net.InetAddress; +import java.net.MulticastSocket; +import java.net.NetworkInterface; +import java.net.ProxySelector; +import java.net.ResponseCache; +import java.net.SocketAddress; +import java.net.SocketImplFactory; import java.net.URL; +import java.net.URLStreamHandler; import java.net.URLStreamHandlerFactory; +import java.util.List; +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocketFactory; + +@SuppressWarnings("unused") // Called from instrumentation code inserted by the Entitlements agent public interface EntitlementChecker { + //////////////////// + // // Exit the JVM process - void check$$exit(Class callerClass, Runtime runtime, int status); + // + + void check$java_lang_Runtime$exit(Class callerClass, Runtime runtime, int status); + + void check$java_lang_Runtime$halt(Class callerClass, Runtime runtime, int status); + + void check$java_lang_System$$exit(Class callerClass, int status); + + //////////////////// + // + // ClassLoader ctor + // + + void check$java_lang_ClassLoader$(Class callerClass); + + void check$java_lang_ClassLoader$(Class callerClass, ClassLoader parent); + + void check$java_lang_ClassLoader$(Class callerClass, String name, ClassLoader parent); + + //////////////////// + // + // SecureClassLoader ctor + // - void check$$halt(Class callerClass, Runtime runtime, int status); + void check$java_security_SecureClassLoader$(Class callerClass); + + void check$java_security_SecureClassLoader$(Class callerClass, ClassLoader parent); + + void check$java_security_SecureClassLoader$(Class callerClass, String name, ClassLoader parent); + + //////////////////// + // + // URLClassLoader constructors + // - // URLClassLoader ctor void check$java_net_URLClassLoader$(Class callerClass, URL[] urls); void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent); @@ -29,4 +85,138 @@ public interface EntitlementChecker { void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent); void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory); + + //////////////////// + // + // "setFactory" methods + // + + void check$javax_net_ssl_HttpsURLConnection$setSSLSocketFactory(Class callerClass, HttpsURLConnection conn, SSLSocketFactory sf); + + void check$javax_net_ssl_HttpsURLConnection$$setDefaultSSLSocketFactory(Class callerClass, SSLSocketFactory sf); + + void check$javax_net_ssl_HttpsURLConnection$$setDefaultHostnameVerifier(Class callerClass, HostnameVerifier hv); + + void check$javax_net_ssl_SSLContext$$setDefault(Class callerClass, SSLContext context); + + //////////////////// + // + // Process creation + // + + void check$java_lang_ProcessBuilder$start(Class callerClass, ProcessBuilder that); + + void check$java_lang_ProcessBuilder$$startPipeline(Class callerClass, List builders); + + //////////////////// + // + // JVM-wide state changes + // + + void check$java_lang_System$$setIn(Class callerClass, InputStream in); + + void check$java_lang_System$$setOut(Class callerClass, PrintStream out); + + void check$java_lang_System$$setErr(Class callerClass, PrintStream err); + + void check$java_lang_Runtime$addShutdownHook(Class callerClass, Runtime runtime, Thread hook); + + void check$java_lang_Runtime$removeShutdownHook(Class callerClass, Runtime runtime, Thread hook); + + void check$jdk_tools_jlink_internal_Jlink$(Class callerClass); + + void check$jdk_tools_jlink_internal_Main$$run(Class callerClass, PrintWriter out, PrintWriter err, String... args); + + void check$jdk_vm_ci_services_JVMCIServiceLocator$$getProviders(Class callerClass, Class service); + + void check$jdk_vm_ci_services_Services$$load(Class callerClass, Class service); + + void check$jdk_vm_ci_services_Services$$loadSingle(Class callerClass, Class service, boolean required); + + void check$com_sun_tools_jdi_VirtualMachineManagerImpl$$virtualMachineManager(Class callerClass); + + void check$java_lang_Thread$$setDefaultUncaughtExceptionHandler(Class callerClass, Thread.UncaughtExceptionHandler ueh); + + void check$java_util_spi_LocaleServiceProvider$(Class callerClass); + + void check$java_text_spi_BreakIteratorProvider$(Class callerClass); + + void check$java_text_spi_CollatorProvider$(Class callerClass); + + void check$java_text_spi_DateFormatProvider$(Class callerClass); + + void check$java_text_spi_DateFormatSymbolsProvider$(Class callerClass); + + void check$java_text_spi_DecimalFormatSymbolsProvider$(Class callerClass); + + void check$java_text_spi_NumberFormatProvider$(Class callerClass); + + void check$java_util_spi_CalendarDataProvider$(Class callerClass); + + void check$java_util_spi_CalendarNameProvider$(Class callerClass); + + void check$java_util_spi_CurrencyNameProvider$(Class callerClass); + + void check$java_util_spi_LocaleNameProvider$(Class callerClass); + + void check$java_util_spi_TimeZoneNameProvider$(Class callerClass); + + void check$java_util_logging_LogManager$(Class callerClass); + + void check$java_net_DatagramSocket$$setDatagramSocketImplFactory(Class callerClass, DatagramSocketImplFactory fac); + + void check$java_net_HttpURLConnection$$setFollowRedirects(Class callerClass, boolean set); + + void check$java_net_ServerSocket$$setSocketFactory(Class callerClass, SocketImplFactory fac); + + void check$java_net_Socket$$setSocketImplFactory(Class callerClass, SocketImplFactory fac); + + void check$java_net_URL$$setURLStreamHandlerFactory(Class callerClass, URLStreamHandlerFactory fac); + + void check$java_net_URLConnection$$setFileNameMap(Class callerClass, FileNameMap map); + + void check$java_net_URLConnection$$setContentHandlerFactory(Class callerClass, ContentHandlerFactory fac); + + //////////////////// + // + // Network access + // + void check$java_net_ProxySelector$$setDefault(Class callerClass, ProxySelector ps); + + void check$java_net_ResponseCache$$setDefault(Class callerClass, ResponseCache rc); + + void check$java_net_spi_InetAddressResolverProvider$(Class callerClass); + + void check$java_net_spi_URLStreamHandlerProvider$(Class callerClass); + + void check$java_net_URL$(Class callerClass, String protocol, String host, int port, String file, URLStreamHandler handler); + + void check$java_net_URL$(Class callerClass, URL context, String spec, URLStreamHandler handler); + + // The only implementation of SSLSession#getSessionContext(); unfortunately it's an interface, so we need to check the implementation + void check$sun_security_ssl_SSLSessionImpl$getSessionContext(Class callerClass, SSLSession sslSession); + + void check$java_net_DatagramSocket$bind(Class callerClass, DatagramSocket that, SocketAddress addr); + + void check$java_net_DatagramSocket$connect(Class callerClass, DatagramSocket that, InetAddress addr); + + void check$java_net_DatagramSocket$connect(Class callerClass, DatagramSocket that, SocketAddress addr); + + void check$java_net_DatagramSocket$send(Class callerClass, DatagramSocket that, DatagramPacket p); + + void check$java_net_DatagramSocket$receive(Class callerClass, DatagramSocket that, DatagramPacket p); + + void check$java_net_DatagramSocket$joinGroup(Class callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni); + + void check$java_net_DatagramSocket$leaveGroup(Class callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni); + + void check$java_net_MulticastSocket$joinGroup(Class callerClass, MulticastSocket that, InetAddress addr); + + void check$java_net_MulticastSocket$joinGroup(Class callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni); + + void check$java_net_MulticastSocket$leaveGroup(Class callerClass, MulticastSocket that, InetAddress addr); + + void check$java_net_MulticastSocket$leaveGroup(Class callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni); + + void check$java_net_MulticastSocket$send(Class callerClass, MulticastSocket that, DatagramPacket p, byte ttl); } diff --git a/libs/entitlement/qa/common/src/main/java/module-info.java b/libs/entitlement/qa/common/src/main/java/module-info.java index 2dd37e3174e08..211b7041e97ea 100644 --- a/libs/entitlement/qa/common/src/main/java/module-info.java +++ b/libs/entitlement/qa/common/src/main/java/module-info.java @@ -12,5 +12,8 @@ requires org.elasticsearch.base; requires org.elasticsearch.logging; + // Modules we'll attempt to use in order to exercise entitlements + requires java.logging; + exports org.elasticsearch.entitlement.qa.common; } diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java new file mode 100644 index 0000000000000..fae873123528d --- /dev/null +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java @@ -0,0 +1,410 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.qa.common; + +import java.io.IOException; +import java.net.DatagramPacket; +import java.net.DatagramSocket; +import java.net.DatagramSocketImpl; +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketException; +import java.security.cert.Certificate; +import java.text.BreakIterator; +import java.text.Collator; +import java.text.DateFormat; +import java.text.DateFormatSymbols; +import java.text.DecimalFormatSymbols; +import java.text.NumberFormat; +import java.text.spi.BreakIteratorProvider; +import java.text.spi.CollatorProvider; +import java.text.spi.DateFormatProvider; +import java.text.spi.DateFormatSymbolsProvider; +import java.text.spi.DecimalFormatSymbolsProvider; +import java.text.spi.NumberFormatProvider; +import java.util.Locale; +import java.util.Map; +import java.util.spi.CalendarDataProvider; +import java.util.spi.CalendarNameProvider; +import java.util.spi.CurrencyNameProvider; +import java.util.spi.LocaleNameProvider; +import java.util.spi.LocaleServiceProvider; +import java.util.spi.TimeZoneNameProvider; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSocketFactory; + +/** + * A collection of concrete subclasses that we can instantiate but that don't actually work. + *

+ * A bit like Mockito but way more painful. + */ +class DummyImplementations { + + static class DummyLocaleServiceProvider extends LocaleServiceProvider { + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyBreakIteratorProvider extends BreakIteratorProvider { + + @Override + public BreakIterator getWordInstance(Locale locale) { + throw unexpected(); + } + + @Override + public BreakIterator getLineInstance(Locale locale) { + throw unexpected(); + } + + @Override + public BreakIterator getCharacterInstance(Locale locale) { + throw unexpected(); + } + + @Override + public BreakIterator getSentenceInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCollatorProvider extends CollatorProvider { + + @Override + public Collator getInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyDateFormatProvider extends DateFormatProvider { + + @Override + public DateFormat getTimeInstance(int style, Locale locale) { + throw unexpected(); + } + + @Override + public DateFormat getDateInstance(int style, Locale locale) { + throw unexpected(); + } + + @Override + public DateFormat getDateTimeInstance(int dateStyle, int timeStyle, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyDateFormatSymbolsProvider extends DateFormatSymbolsProvider { + + @Override + public DateFormatSymbols getInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyDecimalFormatSymbolsProvider extends DecimalFormatSymbolsProvider { + + @Override + public DecimalFormatSymbols getInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyNumberFormatProvider extends NumberFormatProvider { + + @Override + public NumberFormat getCurrencyInstance(Locale locale) { + throw unexpected(); + } + + @Override + public NumberFormat getIntegerInstance(Locale locale) { + throw unexpected(); + } + + @Override + public NumberFormat getNumberInstance(Locale locale) { + throw unexpected(); + } + + @Override + public NumberFormat getPercentInstance(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCalendarDataProvider extends CalendarDataProvider { + + @Override + public int getFirstDayOfWeek(Locale locale) { + throw unexpected(); + } + + @Override + public int getMinimalDaysInFirstWeek(Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCalendarNameProvider extends CalendarNameProvider { + + @Override + public String getDisplayName(String calendarType, int field, int value, int style, Locale locale) { + throw unexpected(); + } + + @Override + public Map getDisplayNames(String calendarType, int field, int style, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyCurrencyNameProvider extends CurrencyNameProvider { + + @Override + public String getSymbol(String currencyCode, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyLocaleNameProvider extends LocaleNameProvider { + + @Override + public String getDisplayLanguage(String languageCode, Locale locale) { + throw unexpected(); + } + + @Override + public String getDisplayCountry(String countryCode, Locale locale) { + throw unexpected(); + } + + @Override + public String getDisplayVariant(String variant, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyTimeZoneNameProvider extends TimeZoneNameProvider { + + @Override + public String getDisplayName(String ID, boolean daylight, int style, Locale locale) { + throw unexpected(); + } + + @Override + public Locale[] getAvailableLocales() { + throw unexpected(); + } + } + + static class DummyHttpsURLConnection extends HttpsURLConnection { + DummyHttpsURLConnection() { + super(null); + } + + @Override + public void connect() { + throw unexpected(); + } + + @Override + public void disconnect() { + throw unexpected(); + } + + @Override + public boolean usingProxy() { + throw unexpected(); + } + + @Override + public String getCipherSuite() { + throw unexpected(); + } + + @Override + public Certificate[] getLocalCertificates() { + throw unexpected(); + } + + @Override + public Certificate[] getServerCertificates() { + throw unexpected(); + } + } + + static class DummySSLSocketFactory extends SSLSocketFactory { + @Override + public Socket createSocket(String host, int port) { + throw unexpected(); + } + + @Override + public Socket createSocket(String host, int port, InetAddress localHost, int localPort) { + throw unexpected(); + } + + @Override + public Socket createSocket(InetAddress host, int port) { + throw unexpected(); + } + + @Override + public Socket createSocket(InetAddress address, int port, InetAddress localAddress, int localPort) { + throw unexpected(); + } + + @Override + public String[] getDefaultCipherSuites() { + throw unexpected(); + } + + @Override + public String[] getSupportedCipherSuites() { + throw unexpected(); + } + + @Override + public Socket createSocket(Socket s, String host, int port, boolean autoClose) { + throw unexpected(); + } + } + + static class DummyDatagramSocket extends DatagramSocket { + DummyDatagramSocket() throws SocketException { + super(new DatagramSocketImpl() { + @Override + protected void create() throws SocketException {} + + @Override + protected void bind(int lport, InetAddress laddr) throws SocketException {} + + @Override + protected void send(DatagramPacket p) throws IOException {} + + @Override + protected int peek(InetAddress i) throws IOException { + return 0; + } + + @Override + protected int peekData(DatagramPacket p) throws IOException { + return 0; + } + + @Override + protected void receive(DatagramPacket p) throws IOException {} + + @Override + protected void setTTL(byte ttl) throws IOException {} + + @Override + protected byte getTTL() throws IOException { + return 0; + } + + @Override + protected void setTimeToLive(int ttl) throws IOException {} + + @Override + protected int getTimeToLive() throws IOException { + return 0; + } + + @Override + protected void join(InetAddress inetaddr) throws IOException {} + + @Override + protected void leave(InetAddress inetaddr) throws IOException {} + + @Override + protected void joinGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {} + + @Override + protected void leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {} + + @Override + protected void close() {} + + @Override + public void setOption(int optID, Object value) throws SocketException {} + + @Override + public Object getOption(int optID) throws SocketException { + return null; + } + + @Override + protected void connect(InetAddress address, int port) throws SocketException {} + }); + } + } + + private static RuntimeException unexpected() { + return new IllegalStateException("This method isn't supposed to be called"); + } +} diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index 1ac4a7506eacb..3a5480f468528 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -11,7 +11,20 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyBreakIteratorProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarDataProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarNameProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCollatorProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCurrencyNameProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDateFormatProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDateFormatSymbolsProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyDecimalFormatSymbolsProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyLocaleNameProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyLocaleServiceProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyNumberFormatProvider; +import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyTimeZoneNameProvider; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.rest.BaseRestHandler; @@ -20,56 +33,389 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; -import java.io.UncheckedIOException; +import java.net.DatagramPacket; +import java.net.DatagramSocket; +import java.net.HttpURLConnection; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.NetworkInterface; +import java.net.ProxySelector; +import java.net.ResponseCache; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketException; import java.net.URL; import java.net.URLClassLoader; +import java.net.URLConnection; +import java.net.URLStreamHandler; +import java.net.spi.InetAddressResolver; +import java.net.spi.InetAddressResolverProvider; +import java.net.spi.URLStreamHandlerProvider; +import java.security.NoSuchAlgorithmException; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; + import static java.util.Map.entry; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.alwaysDenied; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.deniedToPlugins; +import static org.elasticsearch.entitlement.qa.common.RestEntitlementsCheckAction.CheckAction.forPlugins; import static org.elasticsearch.rest.RestRequest.Method.GET; +@SuppressWarnings("unused") public class RestEntitlementsCheckAction extends BaseRestHandler { private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckAction.class); + public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing"); private final String prefix; - private record CheckAction(Runnable action, boolean isServerOnly) { - - static CheckAction serverOnly(Runnable action) { + record CheckAction(CheckedRunnable action, boolean isAlwaysDeniedToPlugins) { + /** + * These cannot be granted to plugins, so our test plugins cannot test the "allowed" case. + * Used both for always-denied entitlements as well as those granted only to the server itself. + */ + static CheckAction deniedToPlugins(CheckedRunnable action) { return new CheckAction(action, true); } - static CheckAction serverAndPlugin(Runnable action) { + static CheckAction forPlugins(CheckedRunnable action) { return new CheckAction(action, false); } + + static CheckAction alwaysDenied(CheckedRunnable action) { + return new CheckAction(action, true); + } } private static final Map checkActions = Map.ofEntries( - entry("runtime_exit", CheckAction.serverOnly(RestEntitlementsCheckAction::runtimeExit)), - entry("runtime_halt", CheckAction.serverOnly(RestEntitlementsCheckAction::runtimeHalt)), - entry("create_classloader", CheckAction.serverAndPlugin(RestEntitlementsCheckAction::createClassLoader)) + entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), + entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), + entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), + entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), + entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), + entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)), + entry("set_https_connection_properties", forPlugins(RestEntitlementsCheckAction::setHttpsConnectionProperties)), + entry("set_default_ssl_socket_factory", alwaysDenied(RestEntitlementsCheckAction::setDefaultSSLSocketFactory)), + entry("set_default_hostname_verifier", alwaysDenied(RestEntitlementsCheckAction::setDefaultHostnameVerifier)), + entry("set_default_ssl_context", alwaysDenied(RestEntitlementsCheckAction::setDefaultSSLContext)), + entry("system_setIn", alwaysDenied(RestEntitlementsCheckAction::system$$setIn)), + entry("system_setOut", alwaysDenied(RestEntitlementsCheckAction::system$$setOut)), + entry("system_setErr", alwaysDenied(RestEntitlementsCheckAction::system$$setErr)), + entry("runtime_addShutdownHook", alwaysDenied(RestEntitlementsCheckAction::runtime$addShutdownHook)), + entry("runtime_removeShutdownHook", alwaysDenied(RestEntitlementsCheckAction::runtime$$removeShutdownHook)), + entry( + "thread_setDefaultUncaughtExceptionHandler", + alwaysDenied(RestEntitlementsCheckAction::thread$$setDefaultUncaughtExceptionHandler) + ), + entry("localeServiceProvider", alwaysDenied(RestEntitlementsCheckAction::localeServiceProvider$)), + entry("breakIteratorProvider", alwaysDenied(RestEntitlementsCheckAction::breakIteratorProvider$)), + entry("collatorProvider", alwaysDenied(RestEntitlementsCheckAction::collatorProvider$)), + entry("dateFormatProvider", alwaysDenied(RestEntitlementsCheckAction::dateFormatProvider$)), + entry("dateFormatSymbolsProvider", alwaysDenied(RestEntitlementsCheckAction::dateFormatSymbolsProvider$)), + entry("decimalFormatSymbolsProvider", alwaysDenied(RestEntitlementsCheckAction::decimalFormatSymbolsProvider$)), + entry("numberFormatProvider", alwaysDenied(RestEntitlementsCheckAction::numberFormatProvider$)), + entry("calendarDataProvider", alwaysDenied(RestEntitlementsCheckAction::calendarDataProvider$)), + entry("calendarNameProvider", alwaysDenied(RestEntitlementsCheckAction::calendarNameProvider$)), + entry("currencyNameProvider", alwaysDenied(RestEntitlementsCheckAction::currencyNameProvider$)), + entry("localeNameProvider", alwaysDenied(RestEntitlementsCheckAction::localeNameProvider$)), + entry("timeZoneNameProvider", alwaysDenied(RestEntitlementsCheckAction::timeZoneNameProvider$)), + entry("logManager", alwaysDenied(RestEntitlementsCheckAction::logManager$)), + + // This group is a bit nasty: if entitlements don't prevent these, then networking is + // irreparably borked for the remainder of the test run. + entry( + "datagramSocket_setDatagramSocketImplFactory", + alwaysDenied(RestEntitlementsCheckAction::datagramSocket$$setDatagramSocketImplFactory) + ), + entry("httpURLConnection_setFollowRedirects", alwaysDenied(RestEntitlementsCheckAction::httpURLConnection$$setFollowRedirects)), + entry("serverSocket_setSocketFactory", alwaysDenied(RestEntitlementsCheckAction::serverSocket$$setSocketFactory)), + entry("socket_setSocketImplFactory", alwaysDenied(RestEntitlementsCheckAction::socket$$setSocketImplFactory)), + entry("url_setURLStreamHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::url$$setURLStreamHandlerFactory)), + entry("urlConnection_setFileNameMap", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setFileNameMap)), + entry("urlConnection_setContentHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory)), + + entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)), + entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)), + entry("createInetAddressResolverProvider", alwaysDenied(RestEntitlementsCheckAction::createInetAddressResolverProvider)), + entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)), + entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)), + entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)), + entry("sslSessionImpl_getSessionContext", alwaysDenied(RestEntitlementsCheckAction::sslSessionImplGetSessionContext)), + entry("datagram_socket_bind", forPlugins(RestEntitlementsCheckAction::bindDatagramSocket)), + entry("datagram_socket_connect", forPlugins(RestEntitlementsCheckAction::connectDatagramSocket)), + entry("datagram_socket_send", forPlugins(RestEntitlementsCheckAction::sendDatagramSocket)), + entry("datagram_socket_receive", forPlugins(RestEntitlementsCheckAction::receiveDatagramSocket)), + entry("datagram_socket_join_group", forPlugins(RestEntitlementsCheckAction::joinGroupDatagramSocket)), + entry("datagram_socket_leave_group", forPlugins(RestEntitlementsCheckAction::leaveGroupDatagramSocket)) ); + private static void createURLStreamHandlerProvider() { + var x = new URLStreamHandlerProvider() { + @Override + public URLStreamHandler createURLStreamHandler(String protocol) { + return null; + } + }; + } + + private static void sslSessionImplGetSessionContext() throws IOException { + SSLSocketFactory factory = HttpsURLConnection.getDefaultSSLSocketFactory(); + try (SSLSocket socket = (SSLSocket) factory.createSocket()) { + SSLSession session = socket.getSession(); + + session.getSessionContext(); + } + } + + @SuppressWarnings("deprecation") + private static void createURLWithURLStreamHandler() throws MalformedURLException { + var x = new URL("http", "host", 1234, "file", new URLStreamHandler() { + @Override + protected URLConnection openConnection(URL u) { + return null; + } + }); + } + + @SuppressWarnings("deprecation") + private static void createURLWithURLStreamHandler2() throws MalformedURLException { + var x = new URL(null, "spec", new URLStreamHandler() { + @Override + protected URLConnection openConnection(URL u) { + return null; + } + }); + } + + private static void createInetAddressResolverProvider() { + var x = new InetAddressResolverProvider() { + @Override + public InetAddressResolver get(Configuration configuration) { + return null; + } + + @Override + public String name() { + return "TEST"; + } + }; + } + + private static void setDefaultResponseCache() { + ResponseCache.setDefault(null); + } + + private static void setDefaultProxySelector() { + ProxySelector.setDefault(null); + } + + private static void setDefaultSSLContext() throws NoSuchAlgorithmException { + SSLContext.setDefault(SSLContext.getDefault()); + } + + private static void setDefaultHostnameVerifier() { + HttpsURLConnection.setDefaultHostnameVerifier((hostname, session) -> false); + } + + private static void setDefaultSSLSocketFactory() { + HttpsURLConnection.setDefaultSSLSocketFactory(new DummyImplementations.DummySSLSocketFactory()); + } + @SuppressForbidden(reason = "Specifically testing Runtime.exit") private static void runtimeExit() { - logger.info("Calling Runtime.exit;"); Runtime.getRuntime().exit(123); } @SuppressForbidden(reason = "Specifically testing Runtime.halt") private static void runtimeHalt() { - logger.info("Calling Runtime.halt;"); Runtime.getRuntime().halt(123); } - private static void createClassLoader() { - logger.info("Calling new URLClassLoader"); + @SuppressForbidden(reason = "Specifically testing System.exit") + private static void systemExit() { + System.exit(123); + } + + private static void createClassLoader() throws IOException { try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { logger.info("Created URLClassLoader [{}]", classLoader.getName()); - } catch (IOException e) { - throw new UncheckedIOException(e); + } + } + + private static void processBuilder_start() throws IOException { + new ProcessBuilder("").start(); + } + + private static void processBuilder_startPipeline() throws IOException { + ProcessBuilder.startPipeline(List.of()); + } + + private static void setHttpsConnectionProperties() { + new DummyImplementations.DummyHttpsURLConnection().setSSLSocketFactory(new DummyImplementations.DummySSLSocketFactory()); + } + + private static void system$$setIn() { + System.setIn(System.in); + } + + @SuppressForbidden(reason = "This should be a no-op so we don't interfere with system streams") + private static void system$$setOut() { + System.setOut(System.out); + } + + @SuppressForbidden(reason = "This should be a no-op so we don't interfere with system streams") + private static void system$$setErr() { + System.setErr(System.err); + } + + private static void runtime$addShutdownHook() { + Runtime.getRuntime().addShutdownHook(NO_OP_SHUTDOWN_HOOK); + } + + private static void runtime$$removeShutdownHook() { + Runtime.getRuntime().removeShutdownHook(NO_OP_SHUTDOWN_HOOK); + } + + private static void thread$$setDefaultUncaughtExceptionHandler() { + Thread.setDefaultUncaughtExceptionHandler(Thread.getDefaultUncaughtExceptionHandler()); + } + + private static void localeServiceProvider$() { + new DummyLocaleServiceProvider(); + } + + private static void breakIteratorProvider$() { + new DummyBreakIteratorProvider(); + } + + private static void collatorProvider$() { + new DummyCollatorProvider(); + } + + private static void dateFormatProvider$() { + new DummyDateFormatProvider(); + } + + private static void dateFormatSymbolsProvider$() { + new DummyDateFormatSymbolsProvider(); + } + + private static void decimalFormatSymbolsProvider$() { + new DummyDecimalFormatSymbolsProvider(); + } + + private static void numberFormatProvider$() { + new DummyNumberFormatProvider(); + } + + private static void calendarDataProvider$() { + new DummyCalendarDataProvider(); + } + + private static void calendarNameProvider$() { + new DummyCalendarNameProvider(); + } + + private static void currencyNameProvider$() { + new DummyCurrencyNameProvider(); + } + + private static void localeNameProvider$() { + new DummyLocaleNameProvider(); + } + + private static void timeZoneNameProvider$() { + new DummyTimeZoneNameProvider(); + } + + private static void logManager$() { + new java.util.logging.LogManager() { + }; + } + + @SuppressWarnings("deprecation") + @SuppressForbidden(reason = "We're required to prevent calls to this forbidden API") + private static void datagramSocket$$setDatagramSocketImplFactory() throws IOException { + DatagramSocket.setDatagramSocketImplFactory(() -> { throw new IllegalStateException(); }); + } + + private static void httpURLConnection$$setFollowRedirects() { + HttpURLConnection.setFollowRedirects(HttpURLConnection.getFollowRedirects()); + } + + @SuppressWarnings("deprecation") + @SuppressForbidden(reason = "We're required to prevent calls to this forbidden API") + private static void serverSocket$$setSocketFactory() throws IOException { + ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); }); + } + + @SuppressWarnings("deprecation") + @SuppressForbidden(reason = "We're required to prevent calls to this forbidden API") + private static void socket$$setSocketImplFactory() throws IOException { + Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); }); + } + + private static void url$$setURLStreamHandlerFactory() { + URL.setURLStreamHandlerFactory(__ -> { throw new IllegalStateException(); }); + } + + private static void urlConnection$$setFileNameMap() { + URLConnection.setFileNameMap(__ -> { throw new IllegalStateException(); }); + } + + private static void urlConnection$$setContentHandlerFactory() { + URLConnection.setContentHandlerFactory(__ -> { throw new IllegalStateException(); }); + } + + private static void bindDatagramSocket() throws SocketException { + try (var socket = new DatagramSocket(null)) { + socket.bind(null); + } + } + + @SuppressForbidden(reason = "testing entitlements") + private static void connectDatagramSocket() throws SocketException { + try (var socket = new DummyImplementations.DummyDatagramSocket()) { + socket.connect(new InetSocketAddress(1234)); + } + } + + private static void joinGroupDatagramSocket() throws IOException { + try (var socket = new DummyImplementations.DummyDatagramSocket()) { + socket.joinGroup( + new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234), + NetworkInterface.getByIndex(0) + ); + } + } + + private static void leaveGroupDatagramSocket() throws IOException { + try (var socket = new DummyImplementations.DummyDatagramSocket()) { + socket.leaveGroup( + new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234), + NetworkInterface.getByIndex(0) + ); + } + } + + @SuppressForbidden(reason = "testing entitlements") + private static void sendDatagramSocket() throws IOException { + try (var socket = new DummyImplementations.DummyDatagramSocket()) { + socket.send(new DatagramPacket(new byte[] { 0 }, 1, InetAddress.getLocalHost(), 1234)); + } + } + + @SuppressForbidden(reason = "testing entitlements") + private static void receiveDatagramSocket() throws IOException { + try (var socket = new DummyImplementations.DummyDatagramSocket()) { + socket.receive(new DatagramPacket(new byte[1], 1, InetAddress.getLocalHost(), 1234)); } } @@ -77,10 +423,10 @@ public RestEntitlementsCheckAction(String prefix) { this.prefix = prefix; } - public static Set getServerAndPluginsCheckActions() { + public static Set getCheckActionsAllowedInPlugins() { return checkActions.entrySet() .stream() - .filter(kv -> kv.getValue().isServerOnly() == false) + .filter(kv -> kv.getValue().isAlwaysDeniedToPlugins() == false) .map(Map.Entry::getKey) .collect(Collectors.toSet()); } @@ -112,6 +458,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } return channel -> { + logger.info("Calling check action [{}]", actionName); checkAction.action().run(); channel.sendResponse(new RestResponse(RestStatus.OK, Strings.format("Succesfully executed action [%s]", actionName))); }; diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle b/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle index 7b3015a5ab831..316b61e15707e 100644 --- a/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle @@ -10,9 +10,9 @@ apply plugin: 'elasticsearch.base-internal-es-plugin' esplugin { - name 'entitlement-allowed-nonmodular' - description 'A non-modular test module that invokes entitlement checks that are supposed to be granted' - classname 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementAllowedNonModularPlugin' + name = 'entitlement-allowed-nonmodular' + description = 'A non-modular test module that invokes entitlement checks that are supposed to be granted' + classname = 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementAllowedNonModularPlugin' } dependencies { diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java index d65981c30f0be..82146e6a87759 100644 --- a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementAllowedNonModularPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementAllowedNonModularPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml index 45d4e57f66521..05a94f09264a8 100644 --- a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml +++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml @@ -1,2 +1,8 @@ ALL-UNNAMED: - create_class_loader + - set_https_connection_properties + - network: + actions: + - listen + - accept + - connect diff --git a/libs/entitlement/qa/entitlement-allowed/build.gradle b/libs/entitlement/qa/entitlement-allowed/build.gradle index 6090d658d2081..b9518f8d65fb5 100644 --- a/libs/entitlement/qa/entitlement-allowed/build.gradle +++ b/libs/entitlement/qa/entitlement-allowed/build.gradle @@ -10,9 +10,9 @@ apply plugin: 'elasticsearch.base-internal-es-plugin' esplugin { - name 'entitlement-allowed' - description 'A test module that invokes entitlement checks that are supposed to be granted' - classname 'org.elasticsearch.entitlement.qa.EntitlementAllowedPlugin' + name = 'entitlement-allowed' + description = 'A test module that invokes entitlement checks that are supposed to be granted' + classname = 'org.elasticsearch.entitlement.qa.EntitlementAllowedPlugin' } dependencies { diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java index d81e23e311be1..8649daf272e70 100644 --- a/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java +++ b/libs/entitlement/qa/entitlement-allowed/src/main/java/org/elasticsearch/entitlement/qa/EntitlementAllowedPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementAllowedPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml index 7b5e848f414b2..0d2c66c2daa2c 100644 --- a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml +++ b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml @@ -1,2 +1,8 @@ org.elasticsearch.entitlement.qa.common: - create_class_loader + - set_https_connection_properties + - network: + actions: + - listen + - accept + - connect diff --git a/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle b/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle index bddd6c83c7cc4..6a88dd66eaf75 100644 --- a/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle +++ b/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle @@ -10,9 +10,9 @@ apply plugin: 'elasticsearch.base-internal-es-plugin' esplugin { - name 'entitlement-denied-nonmodular' - description 'A non-modular test module that invokes non-granted entitlement and triggers exceptions' - classname 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementDeniedNonModularPlugin' + name = 'entitlement-denied-nonmodular' + description = 'A non-modular test module that invokes non-granted entitlement and triggers exceptions' + classname = 'org.elasticsearch.entitlement.qa.nonmodular.EntitlementDeniedNonModularPlugin' } dependencies { diff --git a/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java index 0f908d84260fb..7ca89c735a602 100644 --- a/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java +++ b/libs/entitlement/qa/entitlement-denied-nonmodular/src/main/java/org/elasticsearch/entitlement/qa/nonmodular/EntitlementDeniedNonModularPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementDeniedNonModularPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/entitlement-denied/build.gradle b/libs/entitlement/qa/entitlement-denied/build.gradle index cc269135c5bf5..9d1872563b8c5 100644 --- a/libs/entitlement/qa/entitlement-denied/build.gradle +++ b/libs/entitlement/qa/entitlement-denied/build.gradle @@ -10,9 +10,9 @@ apply plugin: 'elasticsearch.base-internal-es-plugin' esplugin { - name 'entitlement-denied' - description 'A test module that invokes non-granted entitlement and triggers exceptions' - classname 'org.elasticsearch.entitlement.qa.EntitlementDeniedPlugin' + name = 'entitlement-denied' + description = 'A test module that invokes non-granted entitlement and triggers exceptions' + classname = 'org.elasticsearch.entitlement.qa.EntitlementDeniedPlugin' } dependencies { diff --git a/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java index 0ed27e2e576e7..2a2fd35d47cf3 100644 --- a/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java +++ b/libs/entitlement/qa/entitlement-denied/src/main/java/org/elasticsearch/entitlement/qa/EntitlementDeniedPlugin.java @@ -27,7 +27,6 @@ import java.util.function.Supplier; public class EntitlementDeniedPlugin extends Plugin implements ActionPlugin { - @Override public List getRestHandlers( final Settings settings, diff --git a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java index 2fd4472f5cc65..c38e8b3f35efb 100644 --- a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java +++ b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsAllowedIT.java @@ -46,7 +46,7 @@ public EntitlementsAllowedIT(@Name("pathPrefix") String pathPrefix, @Name("actio public static Iterable data() { return Stream.of("allowed", "allowed_nonmodular") .flatMap( - path -> RestEntitlementsCheckAction.getServerAndPluginsCheckActions().stream().map(action -> new Object[] { path, action }) + path -> RestEntitlementsCheckAction.getCheckActionsAllowedInPlugins().stream().map(action -> new Object[] { path, action }) ) .toList(); } diff --git a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java index 9f55a7c9e894d..e2e5a3c4c61e6 100644 --- a/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java +++ b/libs/entitlement/qa/src/javaRestTest/java/org/elasticsearch/entitlement/qa/EntitlementsDeniedIT.java @@ -31,6 +31,8 @@ public class EntitlementsDeniedIT extends ESRestTestCase { .plugin("entitlement-denied-nonmodular") .systemProperty("es.entitlements.enabled", "true") .setting("xpack.security.enabled", "false") + // Logs in libs/entitlement/qa/build/test-results/javaRestTest/TEST-org.elasticsearch.entitlement.qa.EntitlementsDeniedIT.xml + // .setting("logger.org.elasticsearch.entitlement", "DEBUG") .build(); @Override diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java index 2abfb11964a93..257d130302580 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/bootstrap/EntitlementBootstrap.java @@ -16,6 +16,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.entitlement.initialization.EntitlementInitialization; +import org.elasticsearch.entitlement.runtime.api.NotEntitledException; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -23,14 +24,24 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Collection; -import java.util.Objects; import java.util.function.Function; +import static java.util.Objects.requireNonNull; + public class EntitlementBootstrap { - public record PluginData(Path pluginPath, boolean isModular, boolean isExternalPlugin) {} + public record BootstrapArgs(Collection pluginData, Function, String> pluginResolver) { + public BootstrapArgs { + requireNonNull(pluginData); + requireNonNull(pluginResolver); + } + } - public record BootstrapArgs(Collection pluginData, Function, String> pluginResolver) {} + public record PluginData(Path pluginPath, boolean isModular, boolean isExternalPlugin) { + public PluginData { + requireNonNull(pluginPath); + } + } private static BootstrapArgs bootstrapArgs; @@ -50,9 +61,10 @@ public static void bootstrap(Collection pluginData, Function + * + * This serves two purposes: + * + *

    + *
  1. + * a smoke test to make sure the entitlements system is not completely broken, and + *
  2. + *
  3. + * an early test of certain important operations so they don't fail later on at an awkward time. + *
  4. + *
+ * + * @throws IllegalStateException if the entitlements system can't prevent an unauthorized action of our choosing + */ + private static void selfTest() { + ensureCannotStartProcess(); + ensureCanCreateTempFile(); + } + + private static void ensureCannotStartProcess() { + try { + // The command doesn't matter; it doesn't even need to exist + new ProcessBuilder("").start(); + } catch (NotEntitledException e) { + logger.debug("Success: Entitlement protection correctly prevented process creation"); + return; + } catch (IOException e) { + throw new IllegalStateException("Failed entitlement protection self-test", e); + } + throw new IllegalStateException("Entitlement protection self-test was incorrectly permitted"); + } + + /** + * Originally {@code Security.selfTest}. + */ + @SuppressForbidden(reason = "accesses jvm default tempdir as a self-test") + private static void ensureCanCreateTempFile() { + try { + Path p = Files.createTempFile(null, null); + p.toFile().deleteOnExit(); + + // Make an effort to clean up the file immediately; also, deleteOnExit leaves the file if the JVM exits abnormally. + try { + Files.delete(p); + } catch (IOException ignored) { + // Can be caused by virus scanner + } + } catch (NotEntitledException e) { + throw new IllegalStateException("Entitlement protection self-test was incorrectly forbidden", e); + } catch (Exception e) { + throw new IllegalStateException("Unable to perform entitlement protection self-test", e); + } + logger.debug("Success: Entitlement protection correctly permitted temp file creation"); + } + private static final Logger logger = LogManager.getLogger(EntitlementBootstrap.class); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index 9118f67cdc145..ba5ccbafa70ae 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -9,6 +9,7 @@ package org.elasticsearch.entitlement.initialization; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.internal.provider.ProviderLocator; import org.elasticsearch.entitlement.bootstrap.EntitlementBootstrap; import org.elasticsearch.entitlement.bridge.EntitlementChecker; @@ -19,6 +20,7 @@ import org.elasticsearch.entitlement.instrumentation.Transformer; import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement; +import org.elasticsearch.entitlement.runtime.policy.Entitlement; import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement; import org.elasticsearch.entitlement.runtime.policy.Policy; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; @@ -34,6 +36,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -53,6 +56,7 @@ public class EntitlementInitialization { private static final String POLICY_FILE_NAME = "entitlement-policy.yaml"; + private static final Module ENTITLEMENTS_MODULE = PolicyManager.class.getModule(); private static ElasticsearchEntitlementChecker manager; @@ -71,17 +75,17 @@ public static void initialize(Instrumentation inst) throws Exception { Instrumenter instrumenter = INSTRUMENTER_FACTORY.newInstrumenter(EntitlementChecker.class, checkMethods); inst.addTransformer(new Transformer(instrumenter, classesToTransform), true); - // TODO: should we limit this array somehow? - var classesToRetransform = classesToTransform.stream().map(EntitlementInitialization::internalNameToClass).toArray(Class[]::new); - inst.retransformClasses(classesToRetransform); + inst.retransformClasses(findClassesToRetransform(inst.getAllLoadedClasses(), classesToTransform)); } - private static Class internalNameToClass(String internalName) { - try { - return Class.forName(internalName.replace('/', '.'), false, ClassLoader.getPlatformClassLoader()); - } catch (ClassNotFoundException e) { - throw new RuntimeException(e); + private static Class[] findClassesToRetransform(Class[] loadedClasses, Set classesToTransform) { + List> retransform = new ArrayList<>(); + for (Class loadedClass : loadedClasses) { + if (classesToTransform.contains(loadedClass.getName().replace(".", "/"))) { + retransform.add(loadedClass); + } } + return retransform.toArray(new Class[0]); } private static PolicyManager createPolicyManager() throws IOException { @@ -90,9 +94,17 @@ private static PolicyManager createPolicyManager() throws IOException { // TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it var serverPolicy = new Policy( "server", - List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))) + List.of( + new Scope("org.elasticsearch.base", List.of(new CreateClassLoaderEntitlement())), + new Scope("org.elasticsearch.xcontent", List.of(new CreateClassLoaderEntitlement())), + new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement())) + ) ); - return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver()); + // agents run without a module, so this is a special hack for the apm agent + // this should be removed once https://github.com/elastic/elasticsearch/issues/109335 is completed + List agentEntitlements = List.of(new CreateClassLoaderEntitlement()); + var resolver = EntitlementBootstrap.bootstrapArgs().pluginResolver(); + return new PolicyManager(serverPolicy, agentEntitlements, pluginPolicies, resolver, ENTITLEMENTS_MODULE); } private static Map createPluginPolicies(Collection pluginData) throws IOException { @@ -116,9 +128,17 @@ private static Policy loadPluginPolicy(Path pluginRoot, boolean isModular, Strin final Policy policy = parsePolicyIfExists(pluginName, policyFile, isExternalPlugin); // TODO: should this check actually be part of the parser? - for (Scope scope : policy.scopes) { - if (moduleNames.contains(scope.name) == false) { - throw new IllegalStateException("policy [" + policyFile + "] contains invalid module [" + scope.name + "]"); + for (Scope scope : policy.scopes()) { + if (moduleNames.contains(scope.moduleName()) == false) { + throw new IllegalStateException( + Strings.format( + "Invalid module name in policy: plugin [%s] does not have module [%s]; available modules [%s]; policy file [%s]", + pluginName, + scope.moduleName(), + String.join(", ", moduleNames), + policyFile + ) + ); } } return policy; diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java index 256a4d709d9dc..caaa10bec52dd 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/instrumentation/MethodKey.java @@ -16,6 +16,6 @@ * * @param className the "internal name" of the class: includes the package info, but with periods replaced by slashes * @param methodName the method name - * @param parameterTypes a list of "internal names" for the parameter types + * @param parameterTypes a list of "internal names" for the parameter types that appear in the method's descriptor (not the receiver) */ public record MethodKey(String className, String methodName, List parameterTypes) {} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index a5ca0543ad15a..dd39ec3c5fe43 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -10,10 +10,34 @@ package org.elasticsearch.entitlement.runtime.api; import org.elasticsearch.entitlement.bridge.EntitlementChecker; +import org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; +import java.io.InputStream; +import java.io.PrintStream; +import java.io.PrintWriter; +import java.net.ContentHandlerFactory; +import java.net.DatagramPacket; +import java.net.DatagramSocket; +import java.net.DatagramSocketImplFactory; +import java.net.FileNameMap; +import java.net.InetAddress; +import java.net.MulticastSocket; +import java.net.NetworkInterface; +import java.net.ProxySelector; +import java.net.ResponseCache; +import java.net.SocketAddress; +import java.net.SocketImplFactory; import java.net.URL; +import java.net.URLStreamHandler; import java.net.URLStreamHandlerFactory; +import java.util.List; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocketFactory; /** * Implementation of the {@link EntitlementChecker} interface, providing additional @@ -21,6 +45,7 @@ * The trampoline module loads this object via SPI. */ public class ElasticsearchEntitlementChecker implements EntitlementChecker { + private final PolicyManager policyManager; public ElasticsearchEntitlementChecker(PolicyManager policyManager) { @@ -28,15 +53,50 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { } @Override - public void check$$exit(Class callerClass, Runtime runtime, int status) { + public void check$java_lang_Runtime$exit(Class callerClass, Runtime runtime, int status) { policyManager.checkExitVM(callerClass); } @Override - public void check$$halt(Class callerClass, Runtime runtime, int status) { + public void check$java_lang_Runtime$halt(Class callerClass, Runtime runtime, int status) { policyManager.checkExitVM(callerClass); } + @Override + public void check$java_lang_System$$exit(Class callerClass, int status) { + policyManager.checkExitVM(callerClass); + } + + @Override + public void check$java_lang_ClassLoader$(Class callerClass) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_lang_ClassLoader$(Class callerClass, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_lang_ClassLoader$(Class callerClass, String name, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + + @Override + public void check$java_security_SecureClassLoader$(Class callerClass, String name, ClassLoader parent) { + policyManager.checkCreateClassLoader(callerClass); + } + @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls) { policyManager.checkCreateClassLoader(callerClass); @@ -67,4 +127,297 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { ) { policyManager.checkCreateClassLoader(callerClass); } + + @Override + public void check$java_lang_ProcessBuilder$start(Class callerClass, ProcessBuilder processBuilder) { + policyManager.checkStartProcess(callerClass); + } + + @Override + public void check$java_lang_ProcessBuilder$$startPipeline(Class callerClass, List builders) { + policyManager.checkStartProcess(callerClass); + } + + @Override + public void check$java_lang_System$$setIn(Class callerClass, InputStream in) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_System$$setOut(Class callerClass, PrintStream out) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_System$$setErr(Class callerClass, PrintStream err) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_Runtime$addShutdownHook(Class callerClass, Runtime runtime, Thread hook) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_Runtime$removeShutdownHook(Class callerClass, Runtime runtime, Thread hook) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_tools_jlink_internal_Jlink$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_tools_jlink_internal_Main$$run(Class callerClass, PrintWriter out, PrintWriter err, String... args) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_vm_ci_services_JVMCIServiceLocator$$getProviders(Class callerClass, Class service) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_vm_ci_services_Services$$load(Class callerClass, Class service) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$jdk_vm_ci_services_Services$$loadSingle(Class callerClass, Class service, boolean required) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$com_sun_tools_jdi_VirtualMachineManagerImpl$$virtualMachineManager(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_lang_Thread$$setDefaultUncaughtExceptionHandler(Class callerClass, Thread.UncaughtExceptionHandler ueh) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_LocaleServiceProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_BreakIteratorProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_CollatorProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_DateFormatProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_DateFormatSymbolsProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_DecimalFormatSymbolsProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_text_spi_NumberFormatProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_CalendarDataProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_CalendarNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_CurrencyNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_LocaleNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_spi_TimeZoneNameProvider$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_util_logging_LogManager$(Class callerClass) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_DatagramSocket$$setDatagramSocketImplFactory(Class callerClass, DatagramSocketImplFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_HttpURLConnection$$setFollowRedirects(Class callerClass, boolean set) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_ServerSocket$$setSocketFactory(Class callerClass, SocketImplFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_Socket$$setSocketImplFactory(Class callerClass, SocketImplFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_URL$$setURLStreamHandlerFactory(Class callerClass, URLStreamHandlerFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_URLConnection$$setFileNameMap(Class callerClass, FileNameMap map) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_URLConnection$$setContentHandlerFactory(Class callerClass, ContentHandlerFactory fac) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$javax_net_ssl_HttpsURLConnection$setSSLSocketFactory( + Class callerClass, + HttpsURLConnection connection, + SSLSocketFactory sf + ) { + policyManager.checkSetHttpsConnectionProperties(callerClass); + } + + @Override + public void check$javax_net_ssl_HttpsURLConnection$$setDefaultSSLSocketFactory(Class callerClass, SSLSocketFactory sf) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$javax_net_ssl_HttpsURLConnection$$setDefaultHostnameVerifier(Class callerClass, HostnameVerifier hv) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$javax_net_ssl_SSLContext$$setDefault(Class callerClass, SSLContext context) { + policyManager.checkChangeJVMGlobalState(callerClass); + } + + @Override + public void check$java_net_ProxySelector$$setDefault(Class callerClass, ProxySelector ps) { + policyManager.checkChangeNetworkHandling(callerClass); + } + + @Override + public void check$java_net_ResponseCache$$setDefault(Class callerClass, ResponseCache rc) { + policyManager.checkChangeNetworkHandling(callerClass); + } + + @Override + public void check$java_net_spi_InetAddressResolverProvider$(Class callerClass) { + policyManager.checkChangeNetworkHandling(callerClass); + } + + @Override + public void check$java_net_spi_URLStreamHandlerProvider$(Class callerClass) { + policyManager.checkChangeNetworkHandling(callerClass); + } + + @Override + public void check$java_net_URL$(Class callerClass, String protocol, String host, int port, String file, URLStreamHandler handler) { + policyManager.checkChangeNetworkHandling(callerClass); + } + + @Override + public void check$java_net_URL$(Class callerClass, URL context, String spec, URLStreamHandler handler) { + policyManager.checkChangeNetworkHandling(callerClass); + } + + @Override + public void check$sun_security_ssl_SSLSessionImpl$getSessionContext(Class callerClass, SSLSession sslSession) { + policyManager.checkReadSensitiveNetworkInformation(callerClass); + } + + @Override + public void check$java_net_DatagramSocket$bind(Class callerClass, DatagramSocket that, SocketAddress addr) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION); + } + + @Override + public void check$java_net_DatagramSocket$connect(Class callerClass, DatagramSocket that, InetAddress addr) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_DatagramSocket$connect(Class callerClass, DatagramSocket that, SocketAddress addr) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_DatagramSocket$send(Class callerClass, DatagramSocket that, DatagramPacket p) { + var actions = NetworkEntitlement.CONNECT_ACTION; + if (p.getAddress().isMulticastAddress()) { + actions |= NetworkEntitlement.ACCEPT_ACTION; + } + policyManager.checkNetworkAccess(callerClass, actions); + } + + @Override + public void check$java_net_DatagramSocket$receive(Class callerClass, DatagramSocket that, DatagramPacket p) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_DatagramSocket$joinGroup(Class caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) { + policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_DatagramSocket$leaveGroup(Class caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) { + policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_MulticastSocket$joinGroup(Class callerClass, MulticastSocket that, InetAddress addr) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_MulticastSocket$joinGroup(Class caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) { + policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_MulticastSocket$leaveGroup(Class caller, MulticastSocket that, InetAddress addr) { + policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_MulticastSocket$leaveGroup(Class caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) { + policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } + + @Override + public void check$java_net_MulticastSocket$send(Class callerClass, MulticastSocket that, DatagramPacket p, byte ttl) { + policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION); + } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java index 138515be9ffcb..55e4b66595642 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java @@ -9,7 +9,7 @@ package org.elasticsearch.entitlement.runtime.policy; -public class CreateClassLoaderEntitlement implements Entitlement { +public record CreateClassLoaderEntitlement() implements Entitlement { @ExternalEntitlement - public CreateClassLoaderEntitlement() {} + public CreateClassLoaderEntitlement {} } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java index c4a8fc6833581..e5c836ea22b20 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java @@ -12,4 +12,4 @@ /** * Internal policy type (not-parseable -- not available to plugins). */ -public class ExitVMEntitlement implements Entitlement {} +public record ExitVMEntitlement() implements Entitlement {} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java new file mode 100644 index 0000000000000..b6c6a41d5be7f --- /dev/null +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +import org.elasticsearch.core.Strings; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.StringJoiner; + +import static java.util.Map.entry; + +/** + * Describes a network entitlement (sockets) with actions. + */ +public class NetworkEntitlement implements Entitlement { + + public static final int LISTEN_ACTION = 0x1; + public static final int CONNECT_ACTION = 0x2; + public static final int ACCEPT_ACTION = 0x4; + + static final String LISTEN = "listen"; + static final String CONNECT = "connect"; + static final String ACCEPT = "accept"; + + private static final Map ACTION_MAP = Map.ofEntries( + entry(LISTEN, LISTEN_ACTION), + entry(CONNECT, CONNECT_ACTION), + entry(ACCEPT, ACCEPT_ACTION) + ); + + private final int actions; + + @ExternalEntitlement(parameterNames = { "actions" }, esModulesOnly = false) + public NetworkEntitlement(List actionsList) { + + int actionsInt = 0; + + for (String actionString : actionsList) { + var action = ACTION_MAP.get(actionString); + if (action == null) { + throw new IllegalArgumentException("unknown network action [" + actionString + "]"); + } + if ((actionsInt & action) == action) { + throw new IllegalArgumentException(Strings.format("network action [%s] specified multiple times", actionString)); + } + actionsInt |= action; + } + + this.actions = actionsInt; + } + + public static Object printActions(int actions) { + var joiner = new StringJoiner(","); + for (var entry : ACTION_MAP.entrySet()) { + var action = entry.getValue(); + if ((actions & action) == action) { + joiner.add(entry.getKey()); + } + } + return joiner.toString(); + } + + /** + * For the actions to match, the actions present in this entitlement must be a superset + * of the actions required by a check. + * There is only one "negative" case (action required by the check but not present in the entitlement), + * and it can be expressed efficiently via this truth table: + * this.actions | requiredActions | + * 0 | 0 | 0 + * 0 | 1 | 1 --> NOT this.action AND requiredActions + * 1 | 0 | 0 + * 1 | 1 | 0 + * + * @param requiredActions the actions required to be present for a check to pass + * @return true if requiredActions are present, false otherwise + */ + public boolean matchActions(int requiredActions) { + return (~this.actions & requiredActions) == 0; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NetworkEntitlement that = (NetworkEntitlement) o; + return actions == that.actions; + } + + @Override + public int hashCode() { + return Objects.hash(actions); + } + + @Override + public String toString() { + return "NetworkEntitlement{actions=" + actions + '}'; + } +} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java index e8bd7a3fff357..3546472f485fb 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Policy.java @@ -9,38 +9,15 @@ package org.elasticsearch.entitlement.runtime.policy; -import java.util.Collections; import java.util.List; import java.util.Objects; /** * A holder for scoped entitlements. */ -public class Policy { - - public final String name; - public final List scopes; - +public record Policy(String name, List scopes) { public Policy(String name, List scopes) { this.name = Objects.requireNonNull(name); - this.scopes = Collections.unmodifiableList(Objects.requireNonNull(scopes)); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Policy policy = (Policy) o; - return Objects.equals(name, policy.name) && Objects.equals(scopes, policy.scopes); - } - - @Override - public int hashCode() { - return Objects.hash(name, scopes); - } - - @Override - public String toString() { - return "Policy{" + "name='" + name + '\'' + ", scopes=" + scopes + '}'; + this.scopes = List.copyOf(scopes); } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index 8d3efe4eb98e6..f039fbda3dfbd 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -10,38 +10,41 @@ package org.elasticsearch.entitlement.runtime.policy; import org.elasticsearch.core.Strings; -import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; import org.elasticsearch.entitlement.runtime.api.NotEntitledException; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import java.lang.StackWalker.StackFrame; import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.IdentityHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.lang.StackWalker.Option.RETAIN_CLASS_REFERENCE; +import static java.util.Objects.requireNonNull; +import static java.util.function.Predicate.not; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toUnmodifiableMap; + public class PolicyManager { - private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class); + private static final Logger logger = LogManager.getLogger(PolicyManager.class); + + record ModuleEntitlements(Map, List> entitlementsByType) { + public static final ModuleEntitlements NONE = new ModuleEntitlements(Map.of()); - static class ModuleEntitlements { - public static final ModuleEntitlements NONE = new ModuleEntitlements(List.of()); - private final IdentityHashMap, List> entitlementsByType; + ModuleEntitlements { + entitlementsByType = Map.copyOf(entitlementsByType); + } - ModuleEntitlements(List entitlements) { - this.entitlementsByType = entitlements.stream() - .collect(Collectors.toMap(Entitlement::getClass, e -> new ArrayList<>(List.of(e)), (a, b) -> { - a.addAll(b); - return a; - }, IdentityHashMap::new)); + public static ModuleEntitlements from(List entitlements) { + return new ModuleEntitlements(entitlements.stream().collect(groupingBy(Entitlement::getClass))); } public boolean hasEntitlement(Class entitlementClass) { @@ -49,13 +52,18 @@ public boolean hasEntitlement(Class entitlementClass) { } public Stream getEntitlements(Class entitlementClass) { - return entitlementsByType.get(entitlementClass).stream().map(entitlementClass::cast); + var entitlements = entitlementsByType.get(entitlementClass); + if (entitlements == null) { + return Stream.empty(); + } + return entitlements.stream().map(entitlementClass::cast); } } - final Map moduleEntitlementsMap = new HashMap<>(); + final Map moduleEntitlementsMap = new ConcurrentHashMap<>(); protected final Map> serverEntitlements; + protected final List agentEntitlements; protected final Map>> pluginsEntitlements; private final Function, String> pluginResolver; @@ -69,7 +77,6 @@ private static Set findSystemModules() { .stream() .map(ModuleReference::descriptor) .collect(Collectors.toUnmodifiableSet()); - return ModuleLayer.boot() .modules() .stream() @@ -77,17 +84,69 @@ private static Set findSystemModules() { .collect(Collectors.toUnmodifiableSet()); } - public PolicyManager(Policy defaultPolicy, Map pluginPolicies, Function, String> pluginResolver) { - this.serverEntitlements = buildScopeEntitlementsMap(Objects.requireNonNull(defaultPolicy)); - this.pluginsEntitlements = Objects.requireNonNull(pluginPolicies) - .entrySet() + /** + * Frames originating from this module are ignored in the permission logic. + */ + private final Module entitlementsModule; + + public PolicyManager( + Policy serverPolicy, + List agentEntitlements, + Map pluginPolicies, + Function, String> pluginResolver, + Module entitlementsModule + ) { + this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(serverPolicy)); + this.agentEntitlements = agentEntitlements; + this.pluginsEntitlements = requireNonNull(pluginPolicies).entrySet() .stream() - .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); + .collect(toUnmodifiableMap(Map.Entry::getKey, e -> buildScopeEntitlementsMap(e.getValue()))); this.pluginResolver = pluginResolver; + this.entitlementsModule = entitlementsModule; } private static Map> buildScopeEntitlementsMap(Policy policy) { - return policy.scopes.stream().collect(Collectors.toUnmodifiableMap(scope -> scope.name, scope -> scope.entitlements)); + return policy.scopes().stream().collect(toUnmodifiableMap(Scope::moduleName, Scope::entitlements)); + } + + public void checkStartProcess(Class callerClass) { + neverEntitled(callerClass, "start process"); + } + + private void neverEntitled(Class callerClass, String operationDescription) { + var requestingModule = requestingClass(callerClass); + if (isTriviallyAllowed(requestingModule)) { + return; + } + + throw new NotEntitledException( + Strings.format( + "Not entitled: caller [%s], module [%s], operation [%s]", + callerClass, + requestingModule.getName(), + operationDescription + ) + ); + } + + /** + * @param operationDescription is only called when the operation is not trivially allowed, meaning the check is about to fail; + * therefore, its performance is not a major concern. + */ + private void neverEntitled(Class callerClass, Supplier operationDescription) { + var requestingModule = requestingClass(callerClass); + if (isTriviallyAllowed(requestingModule)) { + return; + } + + throw new NotEntitledException( + Strings.format( + "Not entitled: caller [%s], module [%s], operation [%s]", + callerClass, + requestingModule.getName(), + operationDescription.get() + ) + ); } public void checkExitVM(Class callerClass) { @@ -98,19 +157,84 @@ public void checkCreateClassLoader(Class callerClass) { checkEntitlementPresent(callerClass, CreateClassLoaderEntitlement.class); } + public void checkSetHttpsConnectionProperties(Class callerClass) { + checkEntitlementPresent(callerClass, SetHttpsConnectionPropertiesEntitlement.class); + } + + public void checkChangeJVMGlobalState(Class callerClass) { + neverEntitled(callerClass, () -> { + // Look up the check$ method to compose an informative error message. + // This way, we don't need to painstakingly describe every individual global-state change. + Optional checkMethodName = StackWalker.getInstance() + .walk( + frames -> frames.map(StackFrame::getMethodName) + .dropWhile(not(methodName -> methodName.startsWith("check$"))) + .findFirst() + ); + return checkMethodName.map(this::operationDescription).orElse("change JVM global state"); + }); + } + + /** + * Check for operations that can modify the way network operations are handled + */ + public void checkChangeNetworkHandling(Class callerClass) { + checkChangeJVMGlobalState(callerClass); + } + + /** + * Check for operations that can access sensitive network information, e.g. secrets, tokens or SSL sessions + */ + public void checkReadSensitiveNetworkInformation(Class callerClass) { + neverEntitled(callerClass, "access sensitive network information"); + } + + private String operationDescription(String methodName) { + // TODO: Use a more human-readable description. Perhaps share code with InstrumentationServiceImpl.parseCheckerMethodName + return methodName.substring(methodName.indexOf('$')); + } + + public void checkNetworkAccess(Class callerClass, int actions) { + var requestingClass = requestingClass(callerClass); + if (isTriviallyAllowed(requestingClass)) { + return; + } + + ModuleEntitlements entitlements = getEntitlements(requestingClass); + if (entitlements.getEntitlements(NetworkEntitlement.class).anyMatch(n -> n.matchActions(actions))) { + logger.debug( + () -> Strings.format( + "Entitled: class [%s], module [%s], entitlement [Network], actions [Ox%X]", + requestingClass, + requestingClass.getModule().getName(), + actions + ) + ); + return; + } + throw new NotEntitledException( + Strings.format( + "Missing entitlement: class [%s], module [%s], entitlement [Network], actions [%s]", + requestingClass, + requestingClass.getModule().getName(), + NetworkEntitlement.printActions(actions) + ) + ); + } + private void checkEntitlementPresent(Class callerClass, Class entitlementClass) { - var requestingModule = requestingModule(callerClass); - if (isTriviallyAllowed(requestingModule)) { + var requestingClass = requestingClass(callerClass); + if (isTriviallyAllowed(requestingClass)) { return; } - ModuleEntitlements entitlements = getEntitlementsOrThrow(callerClass, requestingModule); + ModuleEntitlements entitlements = getEntitlements(requestingClass); if (entitlements.hasEntitlement(entitlementClass)) { logger.debug( () -> Strings.format( - "Entitled: caller [%s], module [%s], type [%s]", - callerClass, - requestingModule.getName(), + "Entitled: class [%s], module [%s], entitlement [%s]", + requestingClass, + requestingClass.getModule().getName(), entitlementClass.getSimpleName() ) ); @@ -118,30 +242,26 @@ private void checkEntitlementPresent(Class callerClass, Class callerClass, Module requestingModule) { - ModuleEntitlements cachedEntitlement = moduleEntitlementsMap.get(requestingModule); - if (cachedEntitlement != null) { - if (cachedEntitlement == ModuleEntitlements.NONE) { - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule) + "[CACHED]"); - } - return cachedEntitlement; - } + ModuleEntitlements getEntitlements(Class requestingClass) { + return moduleEntitlementsMap.computeIfAbsent(requestingClass.getModule(), m -> computeEntitlements(requestingClass)); + } + private ModuleEntitlements computeEntitlements(Class requestingClass) { + Module requestingModule = requestingClass.getModule(); if (isServerModule(requestingModule)) { - var scopeName = requestingModule.getName(); - return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverEntitlements, scopeName); + return getModuleScopeEntitlements(requestingClass, serverEntitlements, requestingModule.getName()); } // plugins - var pluginName = pluginResolver.apply(callerClass); + var pluginName = pluginResolver.apply(requestingClass); if (pluginName != null) { var pluginEntitlements = pluginsEntitlements.get(pluginName); if (pluginEntitlements != null) { @@ -151,66 +271,80 @@ ModuleEntitlements getEntitlementsOrThrow(Class callerClass, Module requestin } else { scopeName = requestingModule.getName(); } - return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginEntitlements, scopeName); + return getModuleScopeEntitlements(requestingClass, pluginEntitlements, scopeName); } } - moduleEntitlementsMap.put(requestingModule, ModuleEntitlements.NONE); - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule)); - } + if (requestingModule.isNamed() == false) { + // agents are the only thing running non-modular + return ModuleEntitlements.from(agentEntitlements); + } - private static String buildModuleNoPolicyMessage(Class callerClass, Module requestingModule) { - return Strings.format("Missing entitlement policy: caller [%s], module [%s]", callerClass, requestingModule.getName()); + logger.warn("No applicable entitlement policy for class [{}]", requestingClass.getName()); + return ModuleEntitlements.NONE; } - private ModuleEntitlements getModuleEntitlementsOrThrow( + private ModuleEntitlements getModuleScopeEntitlements( Class callerClass, - Module module, Map> scopeEntitlements, String moduleName ) { var entitlements = scopeEntitlements.get(moduleName); if (entitlements == null) { - // Module without entitlements - remember we don't have any - moduleEntitlementsMap.put(module, ModuleEntitlements.NONE); - throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, module)); + logger.warn("No applicable entitlement policy for module [{}], class [{}]", moduleName, callerClass); + return ModuleEntitlements.NONE; } - // We have a policy for this module - var classEntitlements = new ModuleEntitlements(entitlements); - moduleEntitlementsMap.put(module, classEntitlements); - return classEntitlements; + return ModuleEntitlements.from(entitlements); } private static boolean isServerModule(Module requestingModule) { return requestingModule.isNamed() && requestingModule.getLayer() == ModuleLayer.boot(); } - private static Module requestingModule(Class callerClass) { + /** + * Walks the stack to determine which class should be checked for entitlements. + * + * @param callerClass when non-null will be returned; + * this is a fast-path check that can avoid the stack walk + * in cases where the caller class is available. + * @return the requesting class, or {@code null} if the entire call stack + * comes from the entitlement library itself. + */ + Class requestingClass(Class callerClass) { if (callerClass != null) { - Module callerModule = callerClass.getModule(); - if (systemModules.contains(callerModule) == false) { - // fast path - return callerModule; - } + // fast path + return callerClass; } - int framesToSkip = 1 // getCallingClass (this method) - + 1 // the checkXxx method - + 1 // the runtime config method - + 1 // the instrumented method - ; - Optional module = StackWalker.getInstance(StackWalker.Option.RETAIN_CLASS_REFERENCE) - .walk( - s -> s.skip(framesToSkip) - .map(f -> f.getDeclaringClass().getModule()) - .filter(m -> systemModules.contains(m) == false) - .findFirst() - ); - return module.orElse(null); + Optional> result = StackWalker.getInstance(RETAIN_CLASS_REFERENCE) + .walk(frames -> findRequestingClass(frames.map(StackFrame::getDeclaringClass))); + return result.orElse(null); } - private static boolean isTriviallyAllowed(Module requestingModule) { - if (requestingModule == null) { - logger.debug("Entitlement trivially allowed: entire call stack is in composed of classes in system modules"); + /** + * Given a stream of classes corresponding to the frames from a {@link StackWalker}, + * returns the module whose entitlements should be checked. + * + * @throws NullPointerException if the requesting module is {@code null} + */ + Optional> findRequestingClass(Stream> classes) { + return classes.filter(c -> c.getModule() != entitlementsModule) // Ignore the entitlements library + .skip(1) // Skip the sensitive caller method + .findFirst(); + } + + /** + * @return true if permission is granted regardless of the entitlement + */ + private static boolean isTriviallyAllowed(Class requestingClass) { + if (logger.isTraceEnabled()) { + logger.trace("Stack trace for upcoming trivially-allowed check", new Exception()); + } + if (requestingClass == null) { + logger.debug("Entitlement trivially allowed: no caller frames outside the entitlement library"); + return true; + } + if (systemModules.contains(requestingClass.getModule())) { + logger.debug("Entitlement trivially allowed from system module [{}]", requestingClass.getModule().getName()); return true; } logger.trace("Entitlement not trivially allowed"); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java index fb63d5ffbeb48..ac4d4afdd97f8 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java @@ -34,8 +34,12 @@ */ public class PolicyParser { - private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of(FileEntitlement.class, CreateClassLoaderEntitlement.class) - .collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity())); + private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of( + FileEntitlement.class, + CreateClassLoaderEntitlement.class, + SetHttpsConnectionPropertiesEntitlement.class, + NetworkEntitlement.class + ).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity())); protected final XContentParser policyParser; protected final String policyName; diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java index 0fe63eb8da1b7..55e257797d603 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/Scope.java @@ -9,38 +9,17 @@ package org.elasticsearch.entitlement.runtime.policy; -import java.util.Collections; import java.util.List; import java.util.Objects; /** * A holder for entitlements within a single scope. */ -public class Scope { +public record Scope(String moduleName, List entitlements) { - public final String name; - public final List entitlements; - - public Scope(String name, List entitlements) { - this.name = Objects.requireNonNull(name); - this.entitlements = Collections.unmodifiableList(Objects.requireNonNull(entitlements)); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - Scope scope = (Scope) o; - return Objects.equals(name, scope.name) && Objects.equals(entitlements, scope.entitlements); + public Scope(String moduleName, List entitlements) { + this.moduleName = Objects.requireNonNull(moduleName); + this.entitlements = List.copyOf(entitlements); } - @Override - public int hashCode() { - return Objects.hash(name, entitlements); - } - - @Override - public String toString() { - return "Scope{" + "name='" + name + '\'' + ", entitlements=" + entitlements + '}'; - } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/SetHttpsConnectionPropertiesEntitlement.java similarity index 56% rename from server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/SetHttpsConnectionPropertiesEntitlement.java index 49bd38330e3af..bb2f65def9e18 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/SetHttpsConnectionPropertiesEntitlement.java @@ -7,16 +7,12 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.cluster.metadata; +package org.elasticsearch.entitlement.runtime.policy; -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class MetadataFeatures implements FeatureSpecification { - @Override - public Set getFeatures() { - return Set.of(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED); - } +/** + * An Entitlement to allow setting properties to a single Https connection after this has been created + */ +public record SetHttpsConnectionPropertiesEntitlement() implements Entitlement { + @ExternalEntitlement(esModulesOnly = false) + public SetHttpsConnectionPropertiesEntitlement {} } diff --git a/libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/api/Java23ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/api/Java23ElasticsearchEntitlementChecker.java index 912d76ecfc01a..706a6649de329 100644 --- a/libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/api/Java23ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main23/java/org/elasticsearch/entitlement/runtime/api/Java23ElasticsearchEntitlementChecker.java @@ -19,8 +19,8 @@ public Java23ElasticsearchEntitlementChecker(PolicyManager policyManager) { } @Override - public void check$$exit(Class callerClass, Runtime runtime, int status) { + public void check$java_lang_Runtime$exit(Class callerClass, Runtime runtime, int status) { // TODO: this is just an example, we shouldn't really override a method implemented in the superclass - super.check$$exit(callerClass, runtime, status); + super.check$java_lang_Runtime$exit(callerClass, runtime, status); } } diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java new file mode 100644 index 0000000000000..91051d48c365f --- /dev/null +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; + +import static org.hamcrest.Matchers.is; + +public class NetworkEntitlementTests extends ESTestCase { + + public void testMatchesActions() { + var listenEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.LISTEN)); + var emptyEntitlement = new NetworkEntitlement(List.of()); + var connectAcceptEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.CONNECT, NetworkEntitlement.ACCEPT)); + + assertThat(listenEntitlement.matchActions(0), is(true)); + assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(true)); + assertThat(listenEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false)); + assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false)); + assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false)); + assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false)); + assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false)); + + assertThat(connectAcceptEntitlement.matchActions(0), is(true)); + assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false)); + assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(true)); + assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(true)); + assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false)); + assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false)); + assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(true)); + + assertThat(emptyEntitlement.matchActions(0), is(true)); + assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false)); + assertThat(emptyEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false)); + assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false)); + assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false)); + assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false)); + assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false)); + } +} diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java index 45bdf2e457824..d22c2f598e344 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -9,10 +9,11 @@ package org.elasticsearch.entitlement.runtime.policy; -import org.elasticsearch.entitlement.runtime.api.NotEntitledException; +import org.elasticsearch.entitlement.runtime.policy.PolicyManager.ModuleEntitlements; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; import org.elasticsearch.test.jar.JarUtils; +import org.junit.BeforeClass; import java.io.IOException; import java.lang.module.Configuration; @@ -22,6 +23,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Stream; import static java.util.Map.entry; import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED; @@ -29,93 +31,93 @@ import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; @ESTestCase.WithoutSecurityManager public class PolicyManagerTests extends ESTestCase { + /** + * A module you can use for test cases that don't actually care about the + * entitlements module. + */ + private static Module NO_ENTITLEMENTS_MODULE; + + @BeforeClass + public static void beforeClass() { + try { + // Any old module will do for tests using NO_ENTITLEMENTS_MODULE + NO_ENTITLEMENTS_MODULE = makeClassInItsOwnModule().getModule(); + } catch (Exception e) { + throw new IllegalStateException(e); + } + + } public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.of("plugin1", createPluginPolicy("plugin.module")), - c -> "plugin1" + c -> "plugin1", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - var ex = assertThrows( - "No policy for the unnamed module", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) - ); + assertEquals("No policy for the unnamed module", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertEquals( - "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - var ex = assertThrows( - "No policy for this plugin", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) - ); + assertEquals("No policy for this plugin", ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertEquals( - "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsFailureIsCached() { - var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "plugin1", NO_ENTITLEMENTS_MODULE); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); var requestingModule = callerClass.getModule(); - assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); // A second time - var ex = assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); + assertEquals(ModuleEntitlements.NONE, policyManager.getEntitlements(callerClass)); - assertThat(ex.getMessage(), endsWith("[CACHED]")); // Nothing new in the map - assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), - c -> "plugin2" + c -> "plugin2", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var requestingModule = callerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlements = policyManager.getEntitlements(callerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); } public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null); + var policyManager = new PolicyManager(createTestServerPolicy("example"), List.of(), Map.of(), c -> null, NO_ENTITLEMENTS_MODULE); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -124,21 +126,19 @@ public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotF var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); var requestingModule = mockServerClass.getModule(); - var ex = assertThrows( - "No policy for this module in server", - NotEntitledException.class, - () -> policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule) - ); + assertEquals("No policy for this module in server", ModuleEntitlements.NONE, policyManager.getEntitlements(mockServerClass)); - assertEquals( - "Missing entitlement policy: caller [class com.sun.net.httpserver.HttpServer], module [jdk.httpserver]", - ex.getMessage() - ); - assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + assertEquals(Map.of(requestingModule, ModuleEntitlements.NONE), policyManager.moduleEntitlementsMap); } public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException { - var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null); + var policyManager = new PolicyManager( + createTestServerPolicy("jdk.httpserver"), + List.of(), + Map.of(), + c -> null, + NO_ENTITLEMENTS_MODULE + ); // Tests do not run modular, so we cannot use a server class. // But we know that in production code the server module and its classes are in the boot layer. @@ -147,7 +147,7 @@ public void testGetEntitlementsReturnsEntitlementsForServerModule() throws Class var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); var requestingModule = mockServerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule); + var entitlements = policyManager.getEntitlements(mockServerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true)); } @@ -155,19 +155,21 @@ public void testGetEntitlementsReturnsEntitlementsForServerModule() throws Class public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOException, ClassNotFoundException { final Path home = createTempDir(); - Path jar = creteMockPluginJar(home); + Path jar = createMockPluginJar(home); var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.of("mock-plugin", createPluginPolicy("org.example.plugin")), - c -> "mock-plugin" + c -> "mock-plugin", + NO_ENTITLEMENTS_MODULE ); var layer = createLayerForJar(jar, "org.example.plugin"); var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B"); var requestingModule = mockPluginClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(mockPluginClass, requestingModule); + var entitlements = policyManager.getEntitlements(mockPluginClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat( entitlements.getEntitlements(FileEntitlement.class).toList(), @@ -178,25 +180,68 @@ public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOExc public void testGetEntitlementsResultIsCached() { var policyManager = new PolicyManager( createEmptyTestServerPolicy(), + List.of(), Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), - c -> "plugin2" + c -> "plugin2", + NO_ENTITLEMENTS_MODULE ); // Any class from the current module (unnamed) will do var callerClass = this.getClass(); - var requestingModule = callerClass.getModule(); - var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlements = policyManager.getEntitlements(callerClass); assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get(); - var entitlementsAgain = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + var entitlementsAgain = policyManager.getEntitlements(callerClass); // Nothing new in the map assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); assertThat(entitlementsAgain, sameInstance(cachedResult)); } + public void testRequestingClassFastPath() throws IOException, ClassNotFoundException { + var callerClass = makeClassInItsOwnModule(); + assertEquals(callerClass, policyManagerWithEntitlementsModule(NO_ENTITLEMENTS_MODULE).requestingClass(callerClass)); + } + + public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoundException { + var entitlementsClass = makeClassInItsOwnModule(); // A class in the entitlements library itself + var requestingClass = makeClassInItsOwnModule(); // This guy is always the right answer + var instrumentedClass = makeClassInItsOwnModule(); // The class that called the check method + var ignorableClass = makeClassInItsOwnModule(); + + var policyManager = policyManagerWithEntitlementsModule(entitlementsClass.getModule()); + + assertEquals( + "Skip entitlement library and the instrumented method", + requestingClass, + policyManager.findRequestingClass(Stream.of(entitlementsClass, instrumentedClass, requestingClass, ignorableClass)).orElse(null) + ); + assertEquals( + "Skip multiple library frames", + requestingClass, + policyManager.findRequestingClass(Stream.of(entitlementsClass, entitlementsClass, instrumentedClass, requestingClass)) + .orElse(null) + ); + assertThrows( + "Non-modular caller frames are not supported", + NullPointerException.class, + () -> policyManager.findRequestingClass(Stream.of(entitlementsClass, null)) + ); + } + + private static Class makeClassInItsOwnModule() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + Path jar = createMockPluginJar(home); + var layer = createLayerForJar(jar, "org.example.plugin"); + return layer.findLoader("org.example.plugin").loadClass("q.B"); + } + + private static PolicyManager policyManagerWithEntitlementsModule(Module entitlementsModule) { + return new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "test", entitlementsModule); + } + private static Policy createEmptyTestServerPolicy() { return new Policy("server", List.of()); } @@ -219,7 +264,7 @@ private static Policy createPluginPolicy(String... pluginModules) { ); } - private static Path creteMockPluginJar(Path home) throws IOException { + private static Path createMockPluginJar(Path home) throws IOException { Path jar = home.resolve("mock-plugin.jar"); Map sources = Map.ofEntries( diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java index 633c76cb8c04f..1e0c31d2280b8 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java @@ -16,11 +16,7 @@ import java.nio.charset.StandardCharsets; import java.util.List; -import static org.elasticsearch.test.LambdaMatchers.transformedMatch; -import static org.hamcrest.Matchers.both; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; public class PolicyParserTests extends ESTestCase { @@ -39,21 +35,37 @@ public void testGetEntitlementTypeName() { public void testPolicyBuilder() throws IOException { Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml", false) .parsePolicy(); - Policy builtPolicy = new Policy( + Policy expected = new Policy( "test-policy.yaml", List.of(new Scope("entitlement-module-name", List.of(new FileEntitlement("test/path/to/file", List.of("read", "write"))))) ); - assertEquals(parsedPolicy, builtPolicy); + assertEquals(expected, parsedPolicy); } public void testPolicyBuilderOnExternalPlugin() throws IOException { Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml", true) .parsePolicy(); - Policy builtPolicy = new Policy( + Policy expected = new Policy( "test-policy.yaml", List.of(new Scope("entitlement-module-name", List.of(new FileEntitlement("test/path/to/file", List.of("read", "write"))))) ); - assertEquals(parsedPolicy, builtPolicy); + assertEquals(expected, parsedPolicy); + } + + public void testParseNetwork() throws IOException { + Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream(""" + entitlement-module-name: + - network: + actions: + - listen + - accept + - connect + """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", false).parsePolicy(); + Policy expected = new Policy( + "test-policy.yaml", + List.of(new Scope("entitlement-module-name", List.of(new NetworkEntitlement(List.of("listen", "accept", "connect"))))) + ); + assertEquals(expected, parsedPolicy); } public void testParseCreateClassloader() throws IOException { @@ -61,17 +73,22 @@ public void testParseCreateClassloader() throws IOException { entitlement-module-name: - create_class_loader """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", false).parsePolicy(); - Policy builtPolicy = new Policy( + Policy expected = new Policy( "test-policy.yaml", List.of(new Scope("entitlement-module-name", List.of(new CreateClassLoaderEntitlement()))) ); - assertThat( - parsedPolicy.scopes, - contains( - both(transformedMatch((Scope scope) -> scope.name, equalTo("entitlement-module-name"))).and( - transformedMatch(scope -> scope.entitlements, contains(instanceOf(CreateClassLoaderEntitlement.class))) - ) - ) + assertEquals(expected, parsedPolicy); + } + + public void testParseSetHttpsConnectionProperties() throws IOException { + Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream(""" + entitlement-module-name: + - set_https_connection_properties + """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", true).parsePolicy(); + Policy expected = new Policy( + "test-policy.yaml", + List.of(new Scope("entitlement-module-name", List.of(new SetHttpsConnectionPropertiesEntitlement()))) ); + assertEquals(expected, parsedPolicy); } } diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java index e6f8af9b566d5..cf8edf07f280a 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/PatternBank.java @@ -23,7 +23,7 @@ public class PatternBank { - public static PatternBank EMPTY = new PatternBank(Map.of()); + public static final PatternBank EMPTY = new PatternBank(Map.of()); private final Map bank; diff --git a/libs/h3/build.gradle b/libs/h3/build.gradle index 81a0d56ed4606..6036323e160fc 100644 --- a/libs/h3/build.gradle +++ b/libs/h3/build.gradle @@ -35,7 +35,7 @@ tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } -ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) +ext.projectLicenses.set(['The Apache Software License, Version 2.0': providers.provider(() -> 'http://www.apache.org/licenses/LICENSE-2.0')]) licenseFile.set(rootProject.file('licenses/APACHE-LICENSE-2.0.txt')) tasks.withType(LicenseHeadersTask.class).configureEach { diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java b/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java index 3b3f760c0534f..fa52ff6ee56cf 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java @@ -29,7 +29,7 @@ final class Constants { /** * sqrt(3) / 2.0 */ - public static double M_SQRT3_2 = 0.8660254037844386467637231707529361834714; + public static final double M_SQRT3_2 = 0.8660254037844386467637231707529361834714; /** * 2.0 * PI */ @@ -37,19 +37,19 @@ final class Constants { /** * The number of H3 base cells */ - public static int NUM_BASE_CELLS = 122; + public static final int NUM_BASE_CELLS = 122; /** * The number of vertices in a hexagon */ - public static int NUM_HEX_VERTS = 6; + public static final int NUM_HEX_VERTS = 6; /** * The number of vertices in a pentagon */ - public static int NUM_PENT_VERTS = 5; + public static final int NUM_PENT_VERTS = 5; /** * H3 index modes */ - public static int H3_CELL_MODE = 1; + public static final int H3_CELL_MODE = 1; /** * square root of 7 */ @@ -64,14 +64,14 @@ final class Constants { * (or distance between adjacent cell center points * on the plane) to gnomonic unit length. */ - public static double RES0_U_GNOMONIC = 0.38196601125010500003; + public static final double RES0_U_GNOMONIC = 0.38196601125010500003; /** * rotation angle between Class II and Class III resolution axes * (asin(sqrt(3.0 / 28.0))) */ - public static double M_AP7_ROT_RADS = 0.333473172251832115336090755351601070065900389; + public static final double M_AP7_ROT_RADS = 0.333473172251832115336090755351601070065900389; /** * threshold epsilon */ - public static double EPSILON = 0.0000000000000001; + public static final double EPSILON = 0.0000000000000001; } diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/H3.java b/libs/h3/src/main/java/org/elasticsearch/h3/H3.java index 08031088728ba..ac35fe6670c1b 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/H3.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/H3.java @@ -33,7 +33,7 @@ public final class H3 { /** * max H3 resolution; H3 version 1 has 16 resolutions, numbered 0 through 15 */ - public static int MAX_H3_RES = 15; + public static final int MAX_H3_RES = 15; private static final long[] NORTH = new long[MAX_H3_RES + 1]; private static final long[] SOUTH = new long[MAX_H3_RES + 1]; diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/H3Index.java b/libs/h3/src/main/java/org/elasticsearch/h3/H3Index.java index 2b1b9cade21a4..88261dd458feb 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/H3Index.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/H3Index.java @@ -41,22 +41,22 @@ public static boolean H3_is_pentagon(long h3) { return BaseCells.isBaseCellPentagon(H3Index.H3_get_base_cell(h3)) && H3Index.h3LeadingNonZeroDigit(h3) == 0; } - public static long H3_INIT = 35184372088831L; + public static final long H3_INIT = 35184372088831L; /** * The bit offset of the mode in an H3 index. */ - public static int H3_MODE_OFFSET = 59; + public static final int H3_MODE_OFFSET = 59; /** * 1's in the 4 mode bits, 0's everywhere else. */ - public static long H3_MODE_MASK = 15L << H3_MODE_OFFSET; + public static final long H3_MODE_MASK = 15L << H3_MODE_OFFSET; /** * 0's in the 4 mode bits, 1's everywhere else. */ - public static long H3_MODE_MASK_NEGATIVE = ~H3_MODE_MASK; + public static final long H3_MODE_MASK_NEGATIVE = ~H3_MODE_MASK; public static long H3_set_mode(long h3, long mode) { return (h3 & H3_MODE_MASK_NEGATIVE) | (mode << H3_MODE_OFFSET); @@ -65,16 +65,16 @@ public static long H3_set_mode(long h3, long mode) { /** * The bit offset of the base cell in an H3 index. */ - public static int H3_BC_OFFSET = 45; + public static final int H3_BC_OFFSET = 45; /** * 1's in the 7 base cell bits, 0's everywhere else. */ - public static long H3_BC_MASK = 127L << H3_BC_OFFSET; + public static final long H3_BC_MASK = 127L << H3_BC_OFFSET; /** * 0's in the 7 base cell bits, 1's everywhere else. */ - public static long H3_BC_MASK_NEGATIVE = ~H3_BC_MASK; + public static final long H3_BC_MASK_NEGATIVE = ~H3_BC_MASK; /** * Sets the integer base cell of h3 to bc. @@ -83,26 +83,26 @@ public static long H3_set_base_cell(long h3, long bc) { return (h3 & H3_BC_MASK_NEGATIVE) | (bc << H3_BC_OFFSET); } - public static int H3_RES_OFFSET = 52; + public static final int H3_RES_OFFSET = 52; /** * 1's in the 4 resolution bits, 0's everywhere else. */ - public static long H3_RES_MASK = 15L << H3_RES_OFFSET; + public static final long H3_RES_MASK = 15L << H3_RES_OFFSET; /** * 0's in the 4 resolution bits, 1's everywhere else. */ - public static long H3_RES_MASK_NEGATIVE = ~H3_RES_MASK; + public static final long H3_RES_MASK_NEGATIVE = ~H3_RES_MASK; /** * The bit offset of the max resolution digit in an H3 index. */ - public static int H3_MAX_OFFSET = 63; + public static final int H3_MAX_OFFSET = 63; /** * 1 in the highest bit, 0's everywhere else. */ - public static long H3_HIGH_BIT_MASK = (1L << H3_MAX_OFFSET); + public static final long H3_HIGH_BIT_MASK = (1L << H3_MAX_OFFSET); /** * Gets the highest bit of the H3 index. @@ -121,12 +121,12 @@ public static long H3_set_resolution(long h3, long res) { /** * The bit offset of the reserved bits in an H3 index. */ - public static int H3_RESERVED_OFFSET = 56; + public static final int H3_RESERVED_OFFSET = 56; /** * 1's in the 3 reserved bits, 0's everywhere else. */ - public static long H3_RESERVED_MASK = (7L << H3_RESERVED_OFFSET); + public static final long H3_RESERVED_MASK = (7L << H3_RESERVED_OFFSET); /** * Gets a value in the reserved space. Should always be zero for valid indexes. @@ -149,12 +149,12 @@ public static int H3_get_resolution(long h3) { /** * The number of bits in a single H3 resolution digit. */ - public static int H3_PER_DIGIT_OFFSET = 3; + public static final int H3_PER_DIGIT_OFFSET = 3; /** * 1's in the 3 bits of res 15 digit bits, 0's everywhere else. */ - public static long H3_DIGIT_MASK = 7L; + public static final long H3_DIGIT_MASK = 7L; /** * Gets the resolution res integer digit (0-7) of h3. diff --git a/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/IngestDocumentBridge.java b/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/IngestDocumentBridge.java index da97be39d97c5..1df089c0427dc 100644 --- a/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/IngestDocumentBridge.java +++ b/libs/logstash-bridge/src/main/java/org/elasticsearch/logstashbridge/ingest/IngestDocumentBridge.java @@ -20,8 +20,6 @@ public class IngestDocumentBridge extends StableBridgeAPI.Proxy { - public static String INGEST_KEY = IngestDocument.INGEST_KEY; - public static IngestDocumentBridge wrap(final IngestDocument ingestDocument) { if (ingestDocument == null) { return null; diff --git a/libs/native/libraries/build.gradle b/libs/native/libraries/build.gradle index c6684052334c5..ed37d4a70931f 100644 --- a/libs/native/libraries/build.gradle +++ b/libs/native/libraries/build.gradle @@ -25,7 +25,7 @@ repositories { exclusiveContent { forRepository { maven { - url "https://artifactory.elastic.dev/artifactory/elasticsearch-native" + url = "https://artifactory.elastic.dev/artifactory/elasticsearch-native" metadataSources { artifact() } diff --git a/libs/simdvec/output.txt b/libs/simdvec/output.txt new file mode 100644 index 0000000000000..acccaea19b765 --- /dev/null +++ b/libs/simdvec/output.txt @@ -0,0 +1,3809 @@ +Using gradle at '/Users/rene/dev/elastic/elasticsearch/gradlew' to run buildfile '/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build.gradle': + +Initialized native services in: /Users/rene/.gradle/native +Initialized jansi services in: /Users/rene/.gradle/native +Found daemon DaemonInfo{pid=51106, address=[aba1969e-035e-4d64-b357-459afffcd02b port:60032, addresses:[/127.0.0.1]], state=Idle, lastBusy=1734939945982, context=DefaultDaemonContext[uid=2770ac86-e90f-46e0-aca4-eb484b46539b,javaHome=/Users/rene/.gradle-idea2/jdks/oracle_corporation-22-aarch64-os_x/jdk-22.jdk/Contents/Home,javaVersion=22,javaVendor=Oracle Corporation,daemonRegistryDir=/Users/rene/.gradle/daemon,pid=51106,idleTimeout=10800000,priority=NORMAL,applyInstrumentationAgent=true,nativeServicesMode=ENABLED,daemonOpts=-XX:+HeapDumpOnOutOfMemoryError,-Xss2m,--add-exports,jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED,--add-opens,java.base/java.time=ALL-UNNAMED,-Dfile.encoding=UTF-8,-Duser.country=DE,-Duser.language=en,-Duser.variant]} however its context does not match the desired criteria. +JVM is incompatible. +Wanted: DaemonRequestContext{jvmCriteria=/Users/rene/.sdkman/candidates/java/21.0.5-oracle (no JDK specified, using current Java home), daemonOpts=[-XX:+HeapDumpOnOutOfMemoryError, -Xss2m, --add-exports, jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED, --add-exports, jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED, --add-exports, jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED, --add-exports, jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED, --add-exports, jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED, --add-opens, java.base/java.time=ALL-UNNAMED, -Dfile.encoding=UTF-8, -Duser.country=DE, -Duser.language=en, -Duser.variant], applyInstrumentationAgent=true, nativeServicesMode=ENABLED, priority=NORMAL} +Actual: DefaultDaemonContext[uid=2770ac86-e90f-46e0-aca4-eb484b46539b,javaHome=/Users/rene/.gradle-idea2/jdks/oracle_corporation-22-aarch64-os_x/jdk-22.jdk/Contents/Home,javaVersion=22,javaVendor=Oracle Corporation,daemonRegistryDir=/Users/rene/.gradle/daemon,pid=51106,idleTimeout=10800000,priority=NORMAL,applyInstrumentationAgent=true,nativeServicesMode=ENABLED,daemonOpts=-XX:+HeapDumpOnOutOfMemoryError,-Xss2m,--add-exports,jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED,--add-exports,jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED,--add-opens,java.base/java.time=ALL-UNNAMED,-Dfile.encoding=UTF-8,-Duser.country=DE,-Duser.language=en,-Duser.variant] + + Looking for a different daemon... +The client will now receive all logging from the daemon (pid: 49343). The daemon log file: /Users/rene/.gradle/daemon/8.11.1/daemon-49343.out.log +Starting 7th build in daemon [uptime: 28 mins 11.835 secs, performance: 98%, GC rate: 0.00/s, heap usage: 0% of 8 GiB] +Using 12 worker leases. +Now considering [/Users/rene/dev/elastic/elasticsearch, /Users/rene/dev/elastic/elasticsearch/build-tools-internal, /Users/rene/dev/elastic/elasticsearch/build-tools, /Users/rene/dev/elastic/elasticsearch/build-conventions] as hierarchies to watch +Now considering [/Users/rene/dev/elastic/elasticsearch/build-conventions, /Users/rene/dev/elastic/elasticsearch, /Users/rene/dev/elastic/elasticsearch/build-tools-internal, /Users/rene/dev/elastic/elasticsearch/build-tools] as hierarchies to watch +Now considering [/Users/rene/dev/elastic/elasticsearch/build-tools, /Users/rene/dev/elastic/elasticsearch/build-conventions, /Users/rene/dev/elastic/elasticsearch, /Users/rene/dev/elastic/elasticsearch/build-tools-internal] as hierarchies to watch +Now considering [/Users/rene/dev/elastic/elasticsearch/build-tools-internal, /Users/rene/dev/elastic/elasticsearch/build-tools, /Users/rene/dev/elastic/elasticsearch/build-conventions, /Users/rene/dev/elastic/elasticsearch] as hierarchies to watch +Calculating task graph as configuration cache cannot be reused because directory 'libs/simdvec' has changed. +Now considering [/Users/rene/dev/elastic/elasticsearch, /Users/rene/dev/elastic/elasticsearch/build-tools-internal, /Users/rene/dev/elastic/elasticsearch/build-tools, /Users/rene/dev/elastic/elasticsearch/build-conventions] as hierarchies to watch +Watching the file system is configured to be enabled if available +File system watching is active +Starting Build +Now considering [/Users/rene/dev/elastic/elasticsearch/build-conventions, /Users/rene/dev/elastic/elasticsearch, /Users/rene/dev/elastic/elasticsearch/build-tools-internal, /Users/rene/dev/elastic/elasticsearch/build-tools] as hierarchies to watch +Now considering [/Users/rene/dev/elastic/elasticsearch/build-tools, /Users/rene/dev/elastic/elasticsearch/build-conventions, /Users/rene/dev/elastic/elasticsearch, /Users/rene/dev/elastic/elasticsearch/build-tools-internal] as hierarchies to watch +Now considering [/Users/rene/dev/elastic/elasticsearch/build-tools-internal, /Users/rene/dev/elastic/elasticsearch/build-tools, /Users/rene/dev/elastic/elasticsearch/build-conventions, /Users/rene/dev/elastic/elasticsearch] as hierarchies to watch + +> Configure project :build-conventions +Evaluating project ':build-conventions' using build file '/Users/rene/dev/elastic/elasticsearch/build-conventions/build.gradle'. +Registering project ':build-conventions' in composite build. Will substitute for module 'org.elasticsearch:build-conventions'. +Resolve mutations for :build-conventions:compileJava (Thread[#1322,Execution worker,5,main]) started. +:build-conventions:compileJava (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :build-conventions:compileJava UP-TO-DATE +Caching disabled for task ':build-conventions:compileJava' because: + Build cache is disabled +Skipping task ':build-conventions:compileJava' as it is up-to-date. +Resolve mutations for :build-conventions:pluginDescriptors (Thread[#1329,Execution worker Thread 8,5,main]) started. +:build-conventions:pluginDescriptors (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :build-conventions:pluginDescriptors UP-TO-DATE +Caching disabled for task ':build-conventions:pluginDescriptors' because: + Build cache is disabled + Not worth caching +Skipping task ':build-conventions:pluginDescriptors' as it is up-to-date. +Resolve mutations for :build-conventions:processResources (Thread[#1329,Execution worker Thread 8,5,main]) started. +:build-conventions:processResources (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :build-conventions:processResources UP-TO-DATE +Caching disabled for task ':build-conventions:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':build-conventions:processResources' as it is up-to-date. +Resolve mutations for :build-conventions:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. +:build-conventions:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :build-conventions:classes UP-TO-DATE +Skipping task ':build-conventions:classes' as it has no actions. +Resolve mutations for :build-conventions:jar (Thread[#1329,Execution worker Thread 8,5,main]) started. +:build-conventions:jar (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :build-conventions:jar UP-TO-DATE +Caching disabled for task ':build-conventions:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':build-conventions:jar' as it is up-to-date. + +> Configure project :build-tools +Evaluating project ':build-tools' using build file '/Users/rene/dev/elastic/elasticsearch/build-tools/build.gradle'. +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with InstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with InstrumentationAnalysisTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with InstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with InstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with InstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with InstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with InstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with InstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with InstrumentationAnalysisTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with InstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with InstrumentationAnalysisTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with InstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with InstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with InstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with InstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with InstrumentationAnalysisTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with InstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with InstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with InstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with InstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with InstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with InstrumentationAnalysisTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with InstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with InstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with InstrumentationAnalysisTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with InstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with InstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with InstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with InstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with InstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with InstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with InstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with InstrumentationAnalysisTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with InstrumentationAnalysisTransform +Transforming main (project :build-conventions) with InstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with InstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with MergeInstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with InstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with MergeInstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with InstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with InstrumentationAnalysisTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with MergeInstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with InstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with MergeInstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with InstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with MergeInstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with InstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with MergeInstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with InstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with MergeInstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with ExternalDependencyInstrumentingArtifactTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with InstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with MergeInstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with InstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with InstrumentationAnalysisTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with MergeInstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with InstrumentationAnalysisTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with MergeInstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with InstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with MergeInstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with ExternalDependencyInstrumentingArtifactTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with InstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with MergeInstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with MergeInstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with MergeInstrumentationAnalysisTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with MergeInstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with InstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with MergeInstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with InstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with MergeInstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with InstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with MergeInstrumentationAnalysisTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with InstrumentationAnalysisTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with MergeInstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with InstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with MergeInstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with InstrumentationAnalysisTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with MergeInstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with InstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with MergeInstrumentationAnalysisTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with MergeInstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with InstrumentationAnalysisTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with MergeInstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with InstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with MergeInstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with InstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with MergeInstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with InstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with MergeInstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with InstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with MergeInstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with MergeInstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with InstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with InstrumentationAnalysisTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming build-conventions.jar (project :build-conventions) with ProjectDependencyInstrumentingArtifactTransform + +> Configure project :build-tools:reaper +Evaluating project ':build-tools:reaper' using build file '/Users/rene/dev/elastic/elasticsearch/build-tools/reaper/build.gradle'. +Registering project ':build-tools' in composite build. Will substitute for module 'org.elasticsearch.gradle:build-tools'. +Registering project ':build-tools:reaper' in composite build. Will substitute for module 'org.elasticsearch.gradle:reaper'. + +> Configure project :build-tools-internal +Evaluating project ':build-tools-internal' using build file '/Users/rene/dev/elastic/elasticsearch/build-tools-internal/build.gradle'. +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with InstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with InstrumentationAnalysisTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with InstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with InstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with InstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with InstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with InstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with InstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with InstrumentationAnalysisTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with InstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with InstrumentationAnalysisTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with InstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with InstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with InstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with InstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with InstrumentationAnalysisTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with InstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with InstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with InstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with InstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with InstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with InstrumentationAnalysisTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with InstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with InstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with InstrumentationAnalysisTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with InstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with InstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with InstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with InstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with InstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with InstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with InstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with InstrumentationAnalysisTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with InstrumentationAnalysisTransform +Transforming main (project :build-conventions) with InstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with InstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with MergeInstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with InstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with MergeInstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with InstrumentationAnalysisTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with MergeInstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with MergeInstrumentationAnalysisTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with InstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with MergeInstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with InstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with MergeInstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with InstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with MergeInstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with InstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with MergeInstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with ExternalDependencyInstrumentingArtifactTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with InstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with MergeInstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with MergeInstrumentationAnalysisTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with InstrumentationAnalysisTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with MergeInstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with InstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with InstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with MergeInstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with InstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with ExternalDependencyInstrumentingArtifactTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with MergeInstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with MergeInstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with MergeInstrumentationAnalysisTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with InstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with MergeInstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with ExternalDependencyInstrumentingArtifactTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with InstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with MergeInstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with InstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with MergeInstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with InstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with MergeInstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with InstrumentationAnalysisTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with MergeInstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with InstrumentationAnalysisTransform +Transforming commons-compress-1.5.jar (org.apache.commons:commons-compress:1.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with MergeInstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with InstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with MergeInstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with InstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with MergeInstrumentationAnalysisTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with InstrumentationAnalysisTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with MergeInstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with InstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with MergeInstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-codec-1.16.0.jar (commons-codec:commons-codec:1.16.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with InstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with MergeInstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with InstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with MergeInstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with ExternalDependencyInstrumentingArtifactTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with InstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with MergeInstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with InstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with MergeInstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with MergeInstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with InstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with InstrumentationAnalysisTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming build-conventions.jar (project :build-conventions) with ProjectDependencyInstrumentingArtifactTransform +Found project 'project :build-conventions' as substitute for module 'org.elasticsearch:build-conventions'. +Found project 'project :build-tools' as substitute for module 'org.elasticsearch.gradle:build-tools'. +Found project 'project :build-tools:reaper' as substitute for module 'org.elasticsearch.gradle:reaper'. +Found project 'project :build-conventions' as substitute for module 'org.elasticsearch:build-conventions'. +Found project 'project :build-tools' as substitute for module 'org.elasticsearch.gradle:build-tools'. +Resolve mutations for :build-tools:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :build-tools:reaper:compileJava (Thread[#1329,Execution worker Thread 8,5,main]) started. +Resolve mutations for :build-tools-internal:extractPluginRequests (Thread[#1331,Execution worker Thread 10,5,main]) started. +work action resolve build-conventions.jar (project :build-conventions) (Thread[#1330,Execution worker Thread 9,5,main]) started. +:build-tools-internal:extractPluginRequests (Thread[#1331,Execution worker Thread 10,5,main]) started. +:build-tools:reaper:compileJava (Thread[#1329,Execution worker Thread 8,5,main]) started. +:build-tools:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :build-tools-internal:extractPluginRequests UP-TO-DATE +Caching disabled for task ':build-tools-internal:extractPluginRequests' because: + Build cache is disabled +Skipping task ':build-tools-internal:extractPluginRequests' as it is up-to-date. +Resolve mutations for :build-tools-internal:generatePluginAdapters (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :build-tools:reaper:compileJava UP-TO-DATE +Caching disabled for task ':build-tools:reaper:compileJava' because: + Build cache is disabled +Skipping task ':build-tools:reaper:compileJava' as it is up-to-date. +No compile result for :build-tools:reaper:compileJava +:build-tools-internal:generatePluginAdapters (Thread[#1325,Execution worker Thread 4,5,main]) started. +No compile result for :build-tools:reaper:compileJava +Resolve mutations for :build-tools:reaper:processResources (Thread[#1329,Execution worker Thread 8,5,main]) started. +:build-tools:reaper:processResources (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :build-tools-internal:generatePluginAdapters UP-TO-DATE +Caching disabled for task ':build-tools-internal:generatePluginAdapters' because: + Build cache is disabled +Skipping task ':build-tools-internal:generatePluginAdapters' as it is up-to-date. + +> Task :build-tools:reaper:processResources NO-SOURCE +Skipping task ':build-tools:reaper:processResources' as it has no source files and no previous output files. +Resolve mutations for :build-tools:reaper:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. +:build-tools:reaper:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :build-tools:reaper:classes UP-TO-DATE +Skipping task ':build-tools:reaper:classes' as it has no actions. +Resolve mutations for :build-tools-internal:pluginDescriptors (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :build-tools:reaper:jar (Thread[#1330,Execution worker Thread 9,5,main]) started. +:build-tools-internal:pluginDescriptors (Thread[#1325,Execution worker Thread 4,5,main]) started. +:build-tools:reaper:jar (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :build-tools-internal:pluginDescriptors UP-TO-DATE +Caching disabled for task ':build-tools-internal:pluginDescriptors' because: + Build cache is disabled + Not worth caching +Skipping task ':build-tools-internal:pluginDescriptors' as it is up-to-date. +Resolve mutations for :build-tools-internal:processResources (Thread[#1325,Execution worker Thread 4,5,main]) started. +:build-tools-internal:processResources (Thread[#1325,Execution worker Thread 4,5,main]) started. + +> Task :build-tools:reaper:jar UP-TO-DATE +Caching disabled for task ':build-tools:reaper:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':build-tools:reaper:jar' as it is up-to-date. +work action resolve reaper.jar (project :build-tools:reaper) (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :build-tools-internal:processResources UP-TO-DATE +Caching disabled for task ':build-tools-internal:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':build-tools-internal:processResources' as it is up-to-date. + +> Task :build-tools:compileJava UP-TO-DATE +Caching disabled for task ':build-tools:compileJava' because: + Build cache is disabled +Skipping task ':build-tools:compileJava' as it is up-to-date. +No compile result for :build-tools:compileJava +No compile result for :build-tools:compileJava +Resolve mutations for :build-tools:compileGroovy (Thread[#1328,Execution worker Thread 7,5,main]) started. +:build-tools:compileGroovy (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :build-tools:compileGroovy NO-SOURCE +Skipping task ':build-tools:compileGroovy' as it has no source files and no previous output files. +Resolve mutations for :build-tools:generateVersionProperties (Thread[#1328,Execution worker Thread 7,5,main]) started. +:build-tools:generateVersionProperties (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :build-tools:generateVersionProperties UP-TO-DATE +Caching disabled for task ':build-tools:generateVersionProperties' because: + Build cache is disabled +Skipping task ':build-tools:generateVersionProperties' as it is up-to-date. +Resolve mutations for :build-tools:pluginDescriptors (Thread[#1328,Execution worker Thread 7,5,main]) started. +:build-tools:pluginDescriptors (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :build-tools:pluginDescriptors UP-TO-DATE +Caching disabled for task ':build-tools:pluginDescriptors' because: + Build cache is disabled + Not worth caching +Skipping task ':build-tools:pluginDescriptors' as it is up-to-date. +Resolve mutations for :build-tools:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. +:build-tools:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :build-tools:processResources UP-TO-DATE +Caching disabled for task ':build-tools:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':build-tools:processResources' as it is up-to-date. +Resolve mutations for :build-tools:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. +:build-tools:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :build-tools:classes UP-TO-DATE +Skipping task ':build-tools:classes' as it has no actions. +Resolve mutations for :build-tools:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. +:build-tools:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :build-tools:jar UP-TO-DATE +Caching disabled for task ':build-tools:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':build-tools:jar' as it is up-to-date. +other build task :build-tools:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. +work action resolve build-tools-9.0.0-SNAPSHOT.jar (project :build-tools) (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :build-tools-internal:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. +:build-tools-internal:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :build-tools-internal:compileJava UP-TO-DATE +Caching disabled for task ':build-tools-internal:compileJava' because: + Build cache is disabled +Skipping task ':build-tools-internal:compileJava' as it is up-to-date. +No compile result for :build-tools-internal:compileJava +No compile result for :build-tools-internal:compileJava +Resolve mutations for :build-tools-internal:compileGroovy (Thread[#1331,Execution worker Thread 10,5,main]) started. +:build-tools-internal:compileGroovy (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :build-tools-internal:compileGroovy UP-TO-DATE +Caching disabled for task ':build-tools-internal:compileGroovy' because: + Build cache is disabled +Skipping task ':build-tools-internal:compileGroovy' as it is up-to-date. +Resolve mutations for :build-tools-internal:compileGroovyPlugins (Thread[#1331,Execution worker Thread 10,5,main]) started. +:build-tools-internal:compileGroovyPlugins (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :build-tools-internal:compileGroovyPlugins UP-TO-DATE +Caching disabled for task ':build-tools-internal:compileGroovyPlugins' because: + Build cache is disabled +Skipping task ':build-tools-internal:compileGroovyPlugins' as it is up-to-date. +Resolve mutations for :build-tools-internal:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. +:build-tools-internal:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :build-tools-internal:classes UP-TO-DATE +Skipping task ':build-tools-internal:classes' as it has no actions. +Resolve mutations for :build-tools-internal:jar (Thread[#1331,Execution worker Thread 10,5,main]) started. +:build-tools-internal:jar (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :build-tools-internal:jar UP-TO-DATE +Caching disabled for task ':build-tools-internal:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':build-tools-internal:jar' as it is up-to-date. +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with InstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with InstrumentationAnalysisTransform +Transforming develocity-gradle-plugin-3.18.1.jar (com.gradle:develocity-gradle-plugin:3.18.1) with InstrumentationAnalysisTransform +Transforming jackson-dataformat-yaml-2.15.0.jar (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.15.0) with InstrumentationAnalysisTransform +Transforming json-schema-validator-1.0.72.jar (com.networknt:json-schema-validator:1.0.72) with InstrumentationAnalysisTransform +Transforming jackson-databind-2.15.0.jar (com.fasterxml.jackson.core:jackson-databind:2.15.0) with InstrumentationAnalysisTransform +Transforming jackson-core-2.15.0.jar (com.fasterxml.jackson.core:jackson-core:2.15.0) with InstrumentationAnalysisTransform +Transforming jackson-annotations-2.15.0.jar (com.fasterxml.jackson.core:jackson-annotations:2.15.0) with InstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with InstrumentationAnalysisTransform +Transforming commons-compress-1.26.1.jar (org.apache.commons:commons-compress:1.26.1) with InstrumentationAnalysisTransform +Transforming httpclient-4.5.14.jar (org.apache.httpcomponents:httpclient:4.5.14) with InstrumentationAnalysisTransform +Transforming gradle-info-plugin-11.3.3.jar (com.netflix.nebula:gradle-info-plugin:11.3.3) with InstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with InstrumentationAnalysisTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with InstrumentationAnalysisTransform +Transforming commons-codec-1.16.1.jar (commons-codec:commons-codec:1.16.1) with InstrumentationAnalysisTransform +Transforming svnkit-1.8.12.jar (org.tmatesoft.svnkit:svnkit:1.8.12) with InstrumentationAnalysisTransform +Transforming jna-platform-5.7.0.jar (net.java.dev.jna:jna-platform:5.7.0) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.connector-factory-0.0.7.jar (com.jcraft:jsch.agentproxy.connector-factory:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.usocket-jna-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-jna:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.pageant-0.0.7.jar (com.jcraft:jsch.agentproxy.pageant:0.0.7) with InstrumentationAnalysisTransform +Transforming jna-5.10.0.jar (net.java.dev.jna:jna:5.10.0) with InstrumentationAnalysisTransform +Transforming gradle-idea-ext-1.1.4.jar (gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:1.1.4) with InstrumentationAnalysisTransform +Transforming forbiddenapis-3.8.jar (de.thetaphi:forbiddenapis:3.8) with InstrumentationAnalysisTransform +Transforming gradle-docker-compose-plugin-0.17.5.jar (com.avast.gradle:gradle-docker-compose-plugin:0.17.5) with InstrumentationAnalysisTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with InstrumentationAnalysisTransform +Transforming ST4-4.3.4.jar (org.antlr:ST4:4.3.4) with InstrumentationAnalysisTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with InstrumentationAnalysisTransform +Transforming httpcore-4.4.16.jar (org.apache.httpcomponents:httpcore:4.4.16) with InstrumentationAnalysisTransform +Transforming snakeyaml-2.0.jar (org.yaml:snakeyaml:2.0) with InstrumentationAnalysisTransform +Transforming javaparser-core-3.18.0.jar (com.github.javaparser:javaparser-core:3.18.0) with InstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with InstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with InstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with InstrumentationAnalysisTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with InstrumentationAnalysisTransform +Transforming commons-lang3-3.14.0.jar (org.apache.commons:commons-lang3:3.14.0) with InstrumentationAnalysisTransform +Transforming p4java-2015.2.1365273.jar (com.perforce:p4java:2015.2.1365273) with InstrumentationAnalysisTransform +Transforming nebula-gradle-interop-2.0.0.jar (com.netflix.nebula:nebula-gradle-interop:2.0.0) with InstrumentationAnalysisTransform +Transforming gradle-contacts-plugin-6.0.0.jar (com.netflix.nebula:gradle-contacts-plugin:6.0.0) with InstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with InstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with InstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with InstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with InstrumentationAnalysisTransform +Transforming gson-2.8.6.jar (com.google.code.gson:gson:2.8.6) with InstrumentationAnalysisTransform +Transforming guava-28.2-jre.jar (com.google.guava:guava:28.2-jre) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with InstrumentationAnalysisTransform +Transforming itu-1.7.0.jar (com.ethlo.time:itu:1.7.0) with InstrumentationAnalysisTransform +Transforming sqljet-1.1.10.jar (org.tmatesoft.sqljet:sqljet:1.1.10) with InstrumentationAnalysisTransform +Transforming antlr-runtime-3.5.3.jar (org.antlr:antlr-runtime:3.5.3) with InstrumentationAnalysisTransform +Transforming commons-logging-1.2.jar (commons-logging:commons-logging:1.2) with InstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with InstrumentationAnalysisTransform +Transforming fastdoubleparser-0.8.0.jar (ch.randelshofer:fastdoubleparser:0.8.0) with InstrumentationAnalysisTransform +Transforming jzlib-1.1.2.jar (com.jcraft:jzlib:1.1.2) with InstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with InstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.svnkit-trilead-ssh2-0.0.7.jar (com.jcraft:jsch.agentproxy.svnkit-trilead-ssh2:0.0.7) with InstrumentationAnalysisTransform +Transforming trilead-ssh2-1.0.0-build220.jar (com.trilead:trilead-ssh2:1.0.0-build220) with InstrumentationAnalysisTransform +Transforming sequence-library-1.0.3.jar (de.regnis.q.sequence:sequence-library:1.0.3) with InstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with InstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with InstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with InstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with InstrumentationAnalysisTransform +Transforming failureaccess-1.0.1.jar (com.google.guava:failureaccess:1.0.1) with InstrumentationAnalysisTransform +Transforming listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava) with InstrumentationAnalysisTransform +Transforming jsr305-3.0.2.jar (com.google.code.findbugs:jsr305:3.0.2) with InstrumentationAnalysisTransform +Transforming checker-qual-2.10.0.jar (org.checkerframework:checker-qual:2.10.0) with InstrumentationAnalysisTransform +Transforming error_prone_annotations-2.3.4.jar (com.google.errorprone:error_prone_annotations:2.3.4) with InstrumentationAnalysisTransform +Transforming j2objc-annotations-1.3.jar (com.google.j2objc:j2objc-annotations:1.3) with InstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with InstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.usocket-nc-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-nc:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.sshagent-0.0.7.jar (com.jcraft:jsch.agentproxy.sshagent:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.core-0.0.7.jar (com.jcraft:jsch.agentproxy.core:0.0.7) with InstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with InstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with InstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with InstrumentationAnalysisTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with InstrumentationAnalysisTransform +Transforming platform-3.4.0.jar (net.java.dev.jna:platform:3.4.0) with InstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with InstrumentationAnalysisTransform +Transforming main (project :build-tools-internal) with InstrumentationAnalysisTransform +Transforming main (project :build-tools-internal) with InstrumentationAnalysisTransform +Transforming main (project :build-conventions) with InstrumentationAnalysisTransform +Transforming main (project :build-tools) with InstrumentationAnalysisTransform +Transforming main (project :build-tools) with InstrumentationAnalysisTransform +Transforming main (project :build-tools:reaper) with InstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with InstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with MergeInstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with InstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with MergeInstrumentationAnalysisTransform +Transforming shadow-gradle-plugin-8.3.5.jar (com.gradleup.shadow:shadow-gradle-plugin:8.3.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming develocity-gradle-plugin-3.18.1.jar (com.gradle:develocity-gradle-plugin:3.18.1) with InstrumentationAnalysisTransform +Transforming develocity-gradle-plugin-3.18.1.jar (com.gradle:develocity-gradle-plugin:3.18.1) with MergeInstrumentationAnalysisTransform +Transforming log4j-core-2.24.1.jar (org.apache.logging.log4j:log4j-core:2.24.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming jackson-dataformat-yaml-2.15.0.jar (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.15.0) with InstrumentationAnalysisTransform +Transforming jackson-dataformat-yaml-2.15.0.jar (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.15.0) with MergeInstrumentationAnalysisTransform +Transforming develocity-gradle-plugin-3.18.1.jar (com.gradle:develocity-gradle-plugin:3.18.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming json-schema-validator-1.0.72.jar (com.networknt:json-schema-validator:1.0.72) with InstrumentationAnalysisTransform +Transforming json-schema-validator-1.0.72.jar (com.networknt:json-schema-validator:1.0.72) with MergeInstrumentationAnalysisTransform +Transforming jackson-dataformat-yaml-2.15.0.jar (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.15.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming jackson-databind-2.15.0.jar (com.fasterxml.jackson.core:jackson-databind:2.15.0) with InstrumentationAnalysisTransform +Transforming jackson-databind-2.15.0.jar (com.fasterxml.jackson.core:jackson-databind:2.15.0) with MergeInstrumentationAnalysisTransform +Transforming json-schema-validator-1.0.72.jar (com.networknt:json-schema-validator:1.0.72) with ExternalDependencyInstrumentingArtifactTransform +Transforming jackson-core-2.15.0.jar (com.fasterxml.jackson.core:jackson-core:2.15.0) with InstrumentationAnalysisTransform +Transforming jackson-databind-2.15.0.jar (com.fasterxml.jackson.core:jackson-databind:2.15.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming jackson-core-2.15.0.jar (com.fasterxml.jackson.core:jackson-core:2.15.0) with MergeInstrumentationAnalysisTransform +Transforming jackson-annotations-2.15.0.jar (com.fasterxml.jackson.core:jackson-annotations:2.15.0) with InstrumentationAnalysisTransform +Transforming jackson-annotations-2.15.0.jar (com.fasterxml.jackson.core:jackson-annotations:2.15.0) with MergeInstrumentationAnalysisTransform +Transforming jackson-core-2.15.0.jar (com.fasterxml.jackson.core:jackson-core:2.15.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with InstrumentationAnalysisTransform +Transforming jackson-annotations-2.15.0.jar (com.fasterxml.jackson.core:jackson-annotations:2.15.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-0.11.jar (org.apache.rat:apache-rat:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with MergeInstrumentationAnalysisTransform +Transforming apache-rat-tasks-0.11.jar (org.apache.rat:apache-rat-tasks:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-compress-1.26.1.jar (org.apache.commons:commons-compress:1.26.1) with InstrumentationAnalysisTransform +Transforming commons-compress-1.26.1.jar (org.apache.commons:commons-compress:1.26.1) with MergeInstrumentationAnalysisTransform +Transforming httpclient-4.5.14.jar (org.apache.httpcomponents:httpclient:4.5.14) with InstrumentationAnalysisTransform +Transforming apache-rat-core-0.11.jar (org.apache.rat:apache-rat-core:0.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming httpclient-4.5.14.jar (org.apache.httpcomponents:httpclient:4.5.14) with MergeInstrumentationAnalysisTransform +Transforming gradle-info-plugin-11.3.3.jar (com.netflix.nebula:gradle-info-plugin:11.3.3) with InstrumentationAnalysisTransform +Transforming commons-compress-1.26.1.jar (org.apache.commons:commons-compress:1.26.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming httpclient-4.5.14.jar (org.apache.httpcomponents:httpclient:4.5.14) with ExternalDependencyInstrumentingArtifactTransform +Transforming gradle-info-plugin-11.3.3.jar (com.netflix.nebula:gradle-info-plugin:11.3.3) with MergeInstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with InstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with MergeInstrumentationAnalysisTransform +Transforming gradle-info-plugin-11.3.3.jar (com.netflix.nebula:gradle-info-plugin:11.3.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with InstrumentationAnalysisTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with MergeInstrumentationAnalysisTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with InstrumentationAnalysisTransform +Transforming spotless-plugin-gradle-6.25.0.jar (com.diffplug.spotless:spotless-plugin-gradle:6.25.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming spotless-lib-extra-2.45.0.jar (com.diffplug.spotless:spotless-lib-extra:2.45.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with MergeInstrumentationAnalysisTransform +Transforming commons-codec-1.16.1.jar (commons-codec:commons-codec:1.16.1) with InstrumentationAnalysisTransform +Transforming commons-codec-1.16.1.jar (commons-codec:commons-codec:1.16.1) with MergeInstrumentationAnalysisTransform +Transforming org.eclipse.jgit-6.7.0.202309050840-r.jar (org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r) with ExternalDependencyInstrumentingArtifactTransform +Transforming svnkit-1.8.12.jar (org.tmatesoft.svnkit:svnkit:1.8.12) with InstrumentationAnalysisTransform +Transforming commons-codec-1.16.1.jar (commons-codec:commons-codec:1.16.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming svnkit-1.8.12.jar (org.tmatesoft.svnkit:svnkit:1.8.12) with MergeInstrumentationAnalysisTransform +Transforming jna-platform-5.7.0.jar (net.java.dev.jna:jna-platform:5.7.0) with InstrumentationAnalysisTransform +Transforming svnkit-1.8.12.jar (org.tmatesoft.svnkit:svnkit:1.8.12) with ExternalDependencyInstrumentingArtifactTransform +Transforming jna-platform-5.7.0.jar (net.java.dev.jna:jna-platform:5.7.0) with MergeInstrumentationAnalysisTransform +Transforming jsch.agentproxy.connector-factory-0.0.7.jar (com.jcraft:jsch.agentproxy.connector-factory:0.0.7) with InstrumentationAnalysisTransform +Transforming jna-platform-5.7.0.jar (net.java.dev.jna:jna-platform:5.7.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsch.agentproxy.connector-factory-0.0.7.jar (com.jcraft:jsch.agentproxy.connector-factory:0.0.7) with MergeInstrumentationAnalysisTransform +Transforming jsch.agentproxy.usocket-jna-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-jna:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.connector-factory-0.0.7.jar (com.jcraft:jsch.agentproxy.connector-factory:0.0.7) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsch.agentproxy.usocket-jna-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-jna:0.0.7) with MergeInstrumentationAnalysisTransform +Transforming jsch.agentproxy.pageant-0.0.7.jar (com.jcraft:jsch.agentproxy.pageant:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.usocket-jna-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-jna:0.0.7) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsch.agentproxy.pageant-0.0.7.jar (com.jcraft:jsch.agentproxy.pageant:0.0.7) with MergeInstrumentationAnalysisTransform +Transforming jna-5.10.0.jar (net.java.dev.jna:jna:5.10.0) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.pageant-0.0.7.jar (com.jcraft:jsch.agentproxy.pageant:0.0.7) with ExternalDependencyInstrumentingArtifactTransform +Transforming jna-5.10.0.jar (net.java.dev.jna:jna:5.10.0) with MergeInstrumentationAnalysisTransform +Transforming gradle-idea-ext-1.1.4.jar (gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:1.1.4) with InstrumentationAnalysisTransform +Transforming jna-5.10.0.jar (net.java.dev.jna:jna:5.10.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming gradle-idea-ext-1.1.4.jar (gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:1.1.4) with MergeInstrumentationAnalysisTransform +Transforming forbiddenapis-3.8.jar (de.thetaphi:forbiddenapis:3.8) with InstrumentationAnalysisTransform +Transforming gradle-idea-ext-1.1.4.jar (gradle.plugin.org.jetbrains.gradle.plugin.idea-ext:gradle-idea-ext:1.1.4) with ExternalDependencyInstrumentingArtifactTransform +Transforming forbiddenapis-3.8.jar (de.thetaphi:forbiddenapis:3.8) with MergeInstrumentationAnalysisTransform +Transforming gradle-docker-compose-plugin-0.17.5.jar (com.avast.gradle:gradle-docker-compose-plugin:0.17.5) with InstrumentationAnalysisTransform +Transforming gradle-docker-compose-plugin-0.17.5.jar (com.avast.gradle:gradle-docker-compose-plugin:0.17.5) with MergeInstrumentationAnalysisTransform +Transforming forbiddenapis-3.8.jar (de.thetaphi:forbiddenapis:3.8) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with InstrumentationAnalysisTransform +Transforming gradle-docker-compose-plugin-0.17.5.jar (com.avast.gradle:gradle-docker-compose-plugin:0.17.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with MergeInstrumentationAnalysisTransform +Transforming ST4-4.3.4.jar (org.antlr:ST4:4.3.4) with InstrumentationAnalysisTransform +Transforming ST4-4.3.4.jar (org.antlr:ST4:4.3.4) with MergeInstrumentationAnalysisTransform +Transforming maven-model-3.6.2.jar (org.apache.maven:maven-model:3.6.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with InstrumentationAnalysisTransform +Transforming ST4-4.3.4.jar (org.antlr:ST4:4.3.4) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-commons-9.7.1.jar (org.ow2.asm:asm-commons:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with InstrumentationAnalysisTransform +Transforming asm-tree-9.7.1.jar (org.ow2.asm:asm-tree:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with MergeInstrumentationAnalysisTransform +Transforming httpcore-4.4.16.jar (org.apache.httpcomponents:httpcore:4.4.16) with InstrumentationAnalysisTransform +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming httpcore-4.4.16.jar (org.apache.httpcomponents:httpcore:4.4.16) with MergeInstrumentationAnalysisTransform +Transforming snakeyaml-2.0.jar (org.yaml:snakeyaml:2.0) with InstrumentationAnalysisTransform +Transforming snakeyaml-2.0.jar (org.yaml:snakeyaml:2.0) with MergeInstrumentationAnalysisTransform +Transforming javaparser-core-3.18.0.jar (com.github.javaparser:javaparser-core:3.18.0) with InstrumentationAnalysisTransform +Transforming httpcore-4.4.16.jar (org.apache.httpcomponents:httpcore:4.4.16) with ExternalDependencyInstrumentingArtifactTransform +Transforming javaparser-core-3.18.0.jar (com.github.javaparser:javaparser-core:3.18.0) with MergeInstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with InstrumentationAnalysisTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with MergeInstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with InstrumentationAnalysisTransform +Transforming snakeyaml-2.0.jar (org.yaml:snakeyaml:2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming solstice-1.7.5.jar (dev.equo.ide:solstice:1.7.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with MergeInstrumentationAnalysisTransform +Transforming javaparser-core-3.18.0.jar (com.github.javaparser:javaparser-core:3.18.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with InstrumentationAnalysisTransform +Transforming org.eclipse.osgi-3.18.300.jar (org.eclipse.platform:org.eclipse.osgi:3.18.300) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with MergeInstrumentationAnalysisTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with InstrumentationAnalysisTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with MergeInstrumentationAnalysisTransform +Transforming commons-lang3-3.14.0.jar (org.apache.commons:commons-lang3:3.14.0) with InstrumentationAnalysisTransform +Transforming ant-1.10.15.jar (org.apache.ant:ant:1.10.15) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-io-2.17.0.jar (commons-io:commons-io:2.17.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-lang3-3.14.0.jar (org.apache.commons:commons-lang3:3.14.0) with MergeInstrumentationAnalysisTransform +Transforming p4java-2015.2.1365273.jar (com.perforce:p4java:2015.2.1365273) with InstrumentationAnalysisTransform +Transforming p4java-2015.2.1365273.jar (com.perforce:p4java:2015.2.1365273) with MergeInstrumentationAnalysisTransform +Transforming nebula-gradle-interop-2.0.0.jar (com.netflix.nebula:nebula-gradle-interop:2.0.0) with InstrumentationAnalysisTransform +Transforming commons-lang3-3.14.0.jar (org.apache.commons:commons-lang3:3.14.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming p4java-2015.2.1365273.jar (com.perforce:p4java:2015.2.1365273) with ExternalDependencyInstrumentingArtifactTransform +Transforming nebula-gradle-interop-2.0.0.jar (com.netflix.nebula:nebula-gradle-interop:2.0.0) with MergeInstrumentationAnalysisTransform +Transforming gradle-contacts-plugin-6.0.0.jar (com.netflix.nebula:gradle-contacts-plugin:6.0.0) with InstrumentationAnalysisTransform +Transforming gradle-contacts-plugin-6.0.0.jar (com.netflix.nebula:gradle-contacts-plugin:6.0.0) with MergeInstrumentationAnalysisTransform +Transforming nebula-gradle-interop-2.0.0.jar (com.netflix.nebula:nebula-gradle-interop:2.0.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with InstrumentationAnalysisTransform +Transforming gradle-contacts-plugin-6.0.0.jar (com.netflix.nebula:gradle-contacts-plugin:6.0.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with MergeInstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with InstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with MergeInstrumentationAnalysisTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with InstrumentationAnalysisTransform +Transforming jdom2-2.0.6.1.jar (org.jdom:jdom2:2.0.6.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with MergeInstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with InstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with MergeInstrumentationAnalysisTransform +Transforming plexus-utils-4.0.2.jar (org.codehaus.plexus:plexus-utils:4.0.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-xml-4.0.4.jar (org.codehaus.plexus:plexus-xml:4.0.4) with ExternalDependencyInstrumentingArtifactTransform +Transforming gson-2.8.6.jar (com.google.code.gson:gson:2.8.6) with InstrumentationAnalysisTransform +Transforming gson-2.8.6.jar (com.google.code.gson:gson:2.8.6) with MergeInstrumentationAnalysisTransform +Transforming jdependency-2.11.jar (org.vafer:jdependency:2.11) with ExternalDependencyInstrumentingArtifactTransform +Transforming guava-28.2-jre.jar (com.google.guava:guava:28.2-jre) with InstrumentationAnalysisTransform +Transforming gson-2.8.6.jar (com.google.code.gson:gson:2.8.6) with ExternalDependencyInstrumentingArtifactTransform +Transforming guava-28.2-jre.jar (com.google.guava:guava:28.2-jre) with MergeInstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with MergeInstrumentationAnalysisTransform +Transforming itu-1.7.0.jar (com.ethlo.time:itu:1.7.0) with InstrumentationAnalysisTransform +Transforming guava-28.2-jre.jar (com.google.guava:guava:28.2-jre) with ExternalDependencyInstrumentingArtifactTransform +Transforming slf4j-api-1.7.36.jar (org.slf4j:slf4j-api:1.7.36) with ExternalDependencyInstrumentingArtifactTransform +Transforming itu-1.7.0.jar (com.ethlo.time:itu:1.7.0) with MergeInstrumentationAnalysisTransform +Transforming sqljet-1.1.10.jar (org.tmatesoft.sqljet:sqljet:1.1.10) with InstrumentationAnalysisTransform +Transforming sqljet-1.1.10.jar (org.tmatesoft.sqljet:sqljet:1.1.10) with MergeInstrumentationAnalysisTransform +Transforming itu-1.7.0.jar (com.ethlo.time:itu:1.7.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming antlr-runtime-3.5.3.jar (org.antlr:antlr-runtime:3.5.3) with InstrumentationAnalysisTransform +Transforming sqljet-1.1.10.jar (org.tmatesoft.sqljet:sqljet:1.1.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming antlr-runtime-3.5.3.jar (org.antlr:antlr-runtime:3.5.3) with MergeInstrumentationAnalysisTransform +Transforming commons-logging-1.2.jar (commons-logging:commons-logging:1.2) with InstrumentationAnalysisTransform +Transforming commons-logging-1.2.jar (commons-logging:commons-logging:1.2) with MergeInstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with InstrumentationAnalysisTransform +Transforming antlr-runtime-3.5.3.jar (org.antlr:antlr-runtime:3.5.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-logging-1.2.jar (commons-logging:commons-logging:1.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming durian-io-1.2.0.jar (com.diffplug.durian:durian-io:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with InstrumentationAnalysisTransform +Transforming durian-collect-1.2.0.jar (com.diffplug.durian:durian-collect:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with MergeInstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with InstrumentationAnalysisTransform +Transforming durian-core-1.2.0.jar (com.diffplug.durian:durian-core:1.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with MergeInstrumentationAnalysisTransform +Transforming fastdoubleparser-0.8.0.jar (ch.randelshofer:fastdoubleparser:0.8.0) with InstrumentationAnalysisTransform +Transforming fastdoubleparser-0.8.0.jar (ch.randelshofer:fastdoubleparser:0.8.0) with MergeInstrumentationAnalysisTransform +Transforming spotless-lib-2.45.0.jar (com.diffplug.spotless:spotless-lib:2.45.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming jzlib-1.1.2.jar (com.jcraft:jzlib:1.1.2) with InstrumentationAnalysisTransform +Transforming jzlib-1.1.2.jar (com.jcraft:jzlib:1.1.2) with MergeInstrumentationAnalysisTransform +Transforming fastdoubleparser-0.8.0.jar (ch.randelshofer:fastdoubleparser:0.8.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with InstrumentationAnalysisTransform +Transforming jzlib-1.1.2.jar (com.jcraft:jzlib:1.1.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with MergeInstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with InstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with MergeInstrumentationAnalysisTransform +Transforming okhttp-4.12.0.jar (com.squareup.okhttp3:okhttp:4.12.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with InstrumentationAnalysisTransform +Transforming okio-jvm-3.6.0.jar (com.squareup.okio:okio-jvm:3.6.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming jsch.agentproxy.svnkit-trilead-ssh2-0.0.7.jar (com.jcraft:jsch.agentproxy.svnkit-trilead-ssh2:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.svnkit-trilead-ssh2-0.0.7.jar (com.jcraft:jsch.agentproxy.svnkit-trilead-ssh2:0.0.7) with MergeInstrumentationAnalysisTransform +Transforming trilead-ssh2-1.0.0-build220.jar (com.trilead:trilead-ssh2:1.0.0-build220) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk8-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk8:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsch.agentproxy.svnkit-trilead-ssh2-0.0.7.jar (com.jcraft:jsch.agentproxy.svnkit-trilead-ssh2:0.0.7) with ExternalDependencyInstrumentingArtifactTransform +Transforming trilead-ssh2-1.0.0-build220.jar (com.trilead:trilead-ssh2:1.0.0-build220) with MergeInstrumentationAnalysisTransform +Transforming sequence-library-1.0.3.jar (de.regnis.q.sequence:sequence-library:1.0.3) with InstrumentationAnalysisTransform +Transforming sequence-library-1.0.3.jar (de.regnis.q.sequence:sequence-library:1.0.3) with MergeInstrumentationAnalysisTransform +Transforming trilead-ssh2-1.0.0-build220.jar (com.trilead:trilead-ssh2:1.0.0-build220) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with InstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with MergeInstrumentationAnalysisTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with InstrumentationAnalysisTransform +Transforming sequence-library-1.0.3.jar (de.regnis.q.sequence:sequence-library:1.0.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with MergeInstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with InstrumentationAnalysisTransform +Transforming commons-collections-3.2.1.jar (commons-collections:commons-collections:3.2.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-lang-2.6.jar (commons-lang:commons-lang:2.6) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with MergeInstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with InstrumentationAnalysisTransform +Transforming commons-cli-1.2.jar (commons-cli:commons-cli:1.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with MergeInstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.15.jar (org.apache.ant:ant-launcher:1.10.15) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with InstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with MergeInstrumentationAnalysisTransform +Transforming maven-xml-impl-4.0.0-alpha-9.jar (org.apache.maven:maven-xml-impl:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming failureaccess-1.0.1.jar (com.google.guava:failureaccess:1.0.1) with InstrumentationAnalysisTransform +Transforming log4j-api-2.24.1.jar (org.apache.logging.log4j:log4j-api:2.24.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming failureaccess-1.0.1.jar (com.google.guava:failureaccess:1.0.1) with MergeInstrumentationAnalysisTransform +Transforming listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava) with InstrumentationAnalysisTransform +Transforming failureaccess-1.0.1.jar (com.google.guava:failureaccess:1.0.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava) with MergeInstrumentationAnalysisTransform +Transforming jsr305-3.0.2.jar (com.google.code.findbugs:jsr305:3.0.2) with InstrumentationAnalysisTransform +Transforming listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsr305-3.0.2.jar (com.google.code.findbugs:jsr305:3.0.2) with MergeInstrumentationAnalysisTransform +Transforming checker-qual-2.10.0.jar (org.checkerframework:checker-qual:2.10.0) with InstrumentationAnalysisTransform +Transforming checker-qual-2.10.0.jar (org.checkerframework:checker-qual:2.10.0) with MergeInstrumentationAnalysisTransform +Transforming error_prone_annotations-2.3.4.jar (com.google.errorprone:error_prone_annotations:2.3.4) with InstrumentationAnalysisTransform +Transforming jsr305-3.0.2.jar (com.google.code.findbugs:jsr305:3.0.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming error_prone_annotations-2.3.4.jar (com.google.errorprone:error_prone_annotations:2.3.4) with MergeInstrumentationAnalysisTransform +Transforming j2objc-annotations-1.3.jar (com.google.j2objc:j2objc-annotations:1.3) with InstrumentationAnalysisTransform +Transforming checker-qual-2.10.0.jar (org.checkerframework:checker-qual:2.10.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming error_prone_annotations-2.3.4.jar (com.google.errorprone:error_prone_annotations:2.3.4) with ExternalDependencyInstrumentingArtifactTransform +Transforming j2objc-annotations-1.3.jar (com.google.j2objc:j2objc-annotations:1.3) with MergeInstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with InstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with MergeInstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with InstrumentationAnalysisTransform +Transforming j2objc-annotations-1.3.jar (com.google.j2objc:j2objc-annotations:1.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with MergeInstrumentationAnalysisTransform +Transforming JavaEWAH-1.2.3.jar (com.googlecode.javaewah:JavaEWAH:1.2.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with InstrumentationAnalysisTransform +Transforming concurrent-trees-2.6.1.jar (com.googlecode.concurrent-trees:concurrent-trees:2.6.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-jdk7-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-jdk7:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming kotlin-stdlib-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsch.agentproxy.usocket-nc-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-nc:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.usocket-nc-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-nc:0.0.7) with MergeInstrumentationAnalysisTransform +Transforming jsch.agentproxy.sshagent-0.0.7.jar (com.jcraft:jsch.agentproxy.sshagent:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.sshagent-0.0.7.jar (com.jcraft:jsch.agentproxy.sshagent:0.0.7) with MergeInstrumentationAnalysisTransform +Transforming jsch.agentproxy.core-0.0.7.jar (com.jcraft:jsch.agentproxy.core:0.0.7) with InstrumentationAnalysisTransform +Transforming jsch.agentproxy.usocket-nc-0.0.7.jar (com.jcraft:jsch.agentproxy.usocket-nc:0.0.7) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsch.agentproxy.core-0.0.7.jar (com.jcraft:jsch.agentproxy.core:0.0.7) with MergeInstrumentationAnalysisTransform +Transforming jsch.agentproxy.sshagent-0.0.7.jar (com.jcraft:jsch.agentproxy.sshagent:0.0.7) with ExternalDependencyInstrumentingArtifactTransform +Transforming jsch.agentproxy.core-0.0.7.jar (com.jcraft:jsch.agentproxy.core:0.0.7) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with InstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with MergeInstrumentationAnalysisTransform +Transforming maven-api-xml-4.0.0-alpha-9.jar (org.apache.maven:maven-api-xml:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with InstrumentationAnalysisTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with MergeInstrumentationAnalysisTransform +Transforming woodstox-core-6.5.1.jar (com.fasterxml.woodstox:woodstox-core:6.5.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming xz-1.9.jar (org.tukaani:xz:1.9) with ExternalDependencyInstrumentingArtifactTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with InstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with MergeInstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with MergeInstrumentationAnalysisTransform +Transforming durian-swt.os-4.2.2.jar (com.diffplug.durian:durian-swt.os:4.2.2) with ExternalDependencyInstrumentingArtifactTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with InstrumentationAnalysisTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with MergeInstrumentationAnalysisTransform +Transforming platform-3.4.0.jar (net.java.dev.jna:platform:3.4.0) with InstrumentationAnalysisTransform +Transforming kotlin-stdlib-common-1.9.10.jar (org.jetbrains.kotlin:kotlin-stdlib-common:1.9.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming annotations-13.0.jar (org.jetbrains:annotations:13.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming platform-3.4.0.jar (net.java.dev.jna:platform:3.4.0) with MergeInstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with InstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with MergeInstrumentationAnalysisTransform +Transforming platform-3.4.0.jar (net.java.dev.jna:platform:3.4.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with InstrumentationAnalysisTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with MergeInstrumentationAnalysisTransform +Transforming maven-api-meta-4.0.0-alpha-9.jar (org.apache.maven:maven-api-meta:4.0.0-alpha-9) with ExternalDependencyInstrumentingArtifactTransform +Transforming stax2-api-4.2.1.jar (org.codehaus.woodstox:stax2-api:4.2.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming build-tools-internal-9.0.0-SNAPSHOT.jar (project :build-tools-internal) with ProjectDependencyInstrumentingArtifactTransform +Transforming build-conventions.jar (project :build-conventions) with ProjectDependencyInstrumentingArtifactTransform +Transforming build-tools-9.0.0-SNAPSHOT.jar (project :build-tools) with ProjectDependencyInstrumentingArtifactTransform +Transforming reaper.jar (project :build-tools:reaper) with ProjectDependencyInstrumentingArtifactTransform +Settings evaluated using settings file '/Users/rene/dev/elastic/elasticsearch/settings.gradle'. +Projects loaded. Root project using build file '/Users/rene/dev/elastic/elasticsearch/build.gradle'. +Included projects: [root project 'elasticsearch', project ':benchmarks', project ':client', project ':distribution', project ':docs', project ':libs', project ':modules', project ':plugins', project ':qa', project ':rest-api-spec', project ':server', project ':test', project ':x-pack', project ':client:benchmark', project ':client:client-benchmark-noop-api-plugin', project ':client:rest', project ':client:sniffer', project ':client:test', project ':distribution:archives', project ':distribution:bwc', project ':distribution:docker', project ':distribution:packages', project ':distribution:tools', project ':libs:cli', project ':libs:core', project ':libs:dissect', project ':libs:entitlement', project ':libs:geo', project ':libs:grok', project ':libs:h3', project ':libs:log4j', project ':libs:logging', project ':libs:logstash-bridge', project ':libs:lz4', project ':libs:native', project ':libs:plugin-analysis-api', project ':libs:plugin-api', project ':libs:plugin-scanner', project ':libs:secure-sm', project ':libs:simdvec', project ':libs:ssl-config', project ':libs:tdigest', project ':libs:x-content', project ':modules:aggregations', project ':modules:analysis-common', project ':modules:apm', project ':modules:data-streams', project ':modules:dot-prefix-validation', project ':modules:health-shards-availability', project ':modules:ingest-attachment', project ':modules:ingest-common', project ':modules:ingest-geoip', project ':modules:ingest-user-agent', project ':modules:kibana', project ':modules:lang-expression', project ':modules:lang-mustache', project ':modules:lang-painless', project ':modules:legacy-geo', project ':modules:mapper-extras', project ':modules:parent-join', project ':modules:percolator', project ':modules:rank-eval', project ':modules:reindex', project ':modules:repository-azure', project ':modules:repository-gcs', project ':modules:repository-s3', project ':modules:repository-url', project ':modules:rest-root', project ':modules:runtime-fields-common', project ':modules:systemd', project ':modules:transport-netty4', project ':plugins:analysis-icu', project ':plugins:analysis-kuromoji', project ':plugins:analysis-nori', project ':plugins:analysis-phonetic', project ':plugins:analysis-smartcn', project ':plugins:analysis-stempel', project ':plugins:analysis-ukrainian', project ':plugins:discovery-azure-classic', project ':plugins:discovery-ec2', project ':plugins:discovery-gce', project ':plugins:mapper-annotated-text', project ':plugins:mapper-murmur3', project ':plugins:mapper-size', project ':plugins:repository-hdfs', project ':plugins:store-smb', project ':qa:ccs-common-rest', project ':qa:ccs-rolling-upgrade-remote-cluster', project ':qa:ccs-unavailable-clusters', project ':qa:custom-rest-controller', project ':qa:evil-tests', project ':qa:full-cluster-restart', project ':qa:logging-config', project ':qa:logging-spi', project ':qa:lucene-index-compatibility', project ':qa:mixed-cluster', project ':qa:multi-cluster-search', project ':qa:no-bootstrap-tests', project ':qa:packaging', project ':qa:remote-clusters', project ':qa:repository-multi-version', project ':qa:restricted-loggers', project ':qa:rolling-upgrade', project ':qa:rolling-upgrade-legacy', project ':qa:smoke-test-http', project ':qa:smoke-test-ingest-disabled', project ':qa:smoke-test-ingest-with-all-dependencies', project ':qa:smoke-test-multinode', project ':qa:smoke-test-plugins', project ':qa:stable-api', project ':qa:system-indices', project ':qa:unconfigured-node-name', project ':qa:verify-version-constants', project ':test:external-modules', project ':test:fixtures', project ':test:framework', project ':test:immutable-collections-patch', project ':test:logger-usage', project ':test:metadata-extractor', project ':test:test-clusters', project ':test:x-content', project ':test:yaml-rest-runner', project ':x-pack:libs', project ':x-pack:license-tools', project ':x-pack:plugin', project ':x-pack:qa', project ':x-pack:rest-resources-zip', project ':x-pack:test', project ':distribution:archives:darwin-aarch64-tar', project ':distribution:archives:darwin-tar', project ':distribution:archives:integ-test-zip', project ':distribution:archives:linux-aarch64-tar', project ':distribution:archives:linux-tar', project ':distribution:archives:windows-zip', project ':distribution:bwc:bugfix', project ':distribution:bwc:bugfix2', project ':distribution:bwc:maintenance', project ':distribution:bwc:minor', project ':distribution:bwc:staged', project ':distribution:docker:cloud-ess-docker-aarch64-export', project ':distribution:docker:cloud-ess-docker-export', project ':distribution:docker:docker-aarch64-export', project ':distribution:docker:docker-export', project ':distribution:docker:ironbank-docker-aarch64-export', project ':distribution:docker:ironbank-docker-export', project ':distribution:docker:wolfi-docker-aarch64-export', project ':distribution:docker:wolfi-docker-export', project ':distribution:packages:aarch64-deb', project ':distribution:packages:aarch64-rpm', project ':distribution:packages:deb', project ':distribution:packages:rpm', project ':distribution:tools:ansi-console', project ':distribution:tools:cli-launcher', project ':distribution:tools:geoip-cli', project ':distribution:tools:java-version-checker', project ':distribution:tools:keystore-cli', project ':distribution:tools:plugin-cli', project ':distribution:tools:server-cli', project ':distribution:tools:windows-service-cli', project ':libs:entitlement:agent', project ':libs:entitlement:asm-provider', project ':libs:entitlement:bridge', project ':libs:entitlement:qa', project ':libs:entitlement:tools', project ':libs:native:native-libraries', project ':libs:x-content:impl', project ':modules:ingest-geoip:qa', project ':modules:lang-painless:spi', project ':plugins:discovery-ec2:qa', project ':plugins:discovery-gce:qa', project ':plugins:repository-hdfs:hadoop-client-api', project ':qa:stable-api:logging', project ':qa:stable-api:plugin-analysis-api', project ':qa:stable-api:plugin-api', project ':test:external-modules:test-apm-integration', project ':test:external-modules:test-delayed-aggs', project ':test:external-modules:test-die-with-dignity', project ':test:external-modules:test-error-query', project ':test:external-modules:test-esql-heap-attack', project ':test:external-modules:test-jvm-crash', project ':test:external-modules:test-latency-simulating-directory', project ':test:fixtures:aws-sts-fixture', project ':test:fixtures:azure-fixture', project ':test:fixtures:ec2-imds-fixture', project ':test:fixtures:gcs-fixture', project ':test:fixtures:geoip-fixture', project ':test:fixtures:hdfs-fixture', project ':test:fixtures:krb5kdc-fixture', project ':test:fixtures:minio-fixture', project ':test:fixtures:old-elasticsearch', project ':test:fixtures:s3-fixture', project ':test:fixtures:testcontainer-utils', project ':test:fixtures:url-fixture', project ':x-pack:libs:es-opensaml-security-api', project ':x-pack:plugin:analytics', project ':x-pack:plugin:apm-data', project ':x-pack:plugin:async', project ':x-pack:plugin:async-search', project ':x-pack:plugin:autoscaling', project ':x-pack:plugin:blob-cache', project ':x-pack:plugin:ccr', project ':x-pack:plugin:core', project ':x-pack:plugin:deprecation', project ':x-pack:plugin:downsample', project ':x-pack:plugin:enrich', project ':x-pack:plugin:ent-search', project ':x-pack:plugin:eql', project ':x-pack:plugin:esql', project ':x-pack:plugin:esql-core', project ':x-pack:plugin:fleet', project ':x-pack:plugin:frozen-indices', project ':x-pack:plugin:geoip-enterprise-downloader', project ':x-pack:plugin:graph', project ':x-pack:plugin:identity-provider', project ':x-pack:plugin:ilm', project ':x-pack:plugin:inference', project ':x-pack:plugin:kql', project ':x-pack:plugin:logsdb', project ':x-pack:plugin:logstash', project ':x-pack:plugin:mapper-aggregate-metric', project ':x-pack:plugin:mapper-constant-keyword', project ':x-pack:plugin:mapper-counted-keyword', project ':x-pack:plugin:mapper-unsigned-long', project ':x-pack:plugin:mapper-version', project ':x-pack:plugin:migrate', project ':x-pack:plugin:ml', project ':x-pack:plugin:ml-package-loader', project ':x-pack:plugin:monitoring', project ':x-pack:plugin:old-lucene-versions', project ':x-pack:plugin:otel-data', project ':x-pack:plugin:profiling', project ':x-pack:plugin:ql', project ':x-pack:plugin:rank-rrf', project ':x-pack:plugin:redact', project ':x-pack:plugin:repositories-metering-api', project ':x-pack:plugin:rollup', project ':x-pack:plugin:search-business-rules', project ':x-pack:plugin:searchable-snapshots', project ':x-pack:plugin:security', project ':x-pack:plugin:shutdown', project ':x-pack:plugin:slm', project ':x-pack:plugin:snapshot-based-recoveries', project ':x-pack:plugin:snapshot-repo-test-kit', project ':x-pack:plugin:spatial', project ':x-pack:plugin:sql', project ':x-pack:plugin:stack', project ':x-pack:plugin:text-structure', project ':x-pack:plugin:transform', project ':x-pack:plugin:vector-tile', project ':x-pack:plugin:voting-only-node', project ':x-pack:plugin:watcher', project ':x-pack:plugin:wildcard', project ':x-pack:plugin:write-load-forecaster', project ':x-pack:qa:core-rest-tests-with-security', project ':x-pack:qa:evil-tests', project ':x-pack:qa:freeze-plugin', project ':x-pack:qa:full-cluster-restart', project ':x-pack:qa:kerberos-tests', project ':x-pack:qa:mixed-tier-cluster', project ':x-pack:qa:multi-cluster-search-security', project ':x-pack:qa:multi-node', project ':x-pack:qa:oidc-op-tests', project ':x-pack:qa:openldap-tests', project ':x-pack:qa:password-protected-keystore', project ':x-pack:qa:reindex-tests-with-security', project ':x-pack:qa:repository-old-versions', project ':x-pack:qa:rolling-upgrade', project ':x-pack:qa:rolling-upgrade-basic', project ':x-pack:qa:rolling-upgrade-multi-cluster', project ':x-pack:qa:runtime-fields', project ':x-pack:qa:saml-idp-tests', project ':x-pack:qa:security-example-spi-extension', project ':x-pack:qa:security-setup-password-tests', project ':x-pack:qa:security-tools-tests', project ':x-pack:qa:smoke-test-plugins', project ':x-pack:qa:smoke-test-plugins-ssl', project ':x-pack:qa:smoke-test-security-with-mustache', project ':x-pack:qa:third-party', project ':x-pack:test:idp-fixture', project ':x-pack:test:smb-fixture', project ':libs:entitlement:qa:common', project ':libs:entitlement:qa:entitlement-allowed', project ':libs:entitlement:qa:entitlement-allowed-nonmodular', project ':libs:entitlement:qa:entitlement-denied', project ':libs:entitlement:qa:entitlement-denied-nonmodular', project ':libs:entitlement:tools:common', project ':libs:entitlement:tools:public-callers-finder', project ':libs:entitlement:tools:securitymanager-scanner', project ':modules:ingest-geoip:qa:file-based-update', project ':modules:ingest-geoip:qa:full-cluster-restart', project ':plugins:discovery-ec2:qa:amazon-ec2', project ':plugins:discovery-gce:qa:gce', project ':x-pack:plugin:async-search:qa', project ':x-pack:plugin:autoscaling:qa', project ':x-pack:plugin:ccr:qa', project ':x-pack:plugin:core:template-resources', project ':x-pack:plugin:deprecation:qa', project ':x-pack:plugin:downsample:qa', project ':x-pack:plugin:enrich:qa', project ':x-pack:plugin:ent-search:qa', project ':x-pack:plugin:eql:qa', project ':x-pack:plugin:esql:arrow', project ':x-pack:plugin:esql:compute', project ':x-pack:plugin:esql:qa', project ':x-pack:plugin:esql-core:test-fixtures', project ':x-pack:plugin:fleet:qa', project ':x-pack:plugin:graph:qa', project ':x-pack:plugin:identity-provider:qa', project ':x-pack:plugin:ilm:qa', project ':x-pack:plugin:inference:qa', project ':x-pack:plugin:logsdb:qa', project ':x-pack:plugin:ml:qa', project ':x-pack:plugin:ql:test-fixtures', project ':x-pack:plugin:repositories-metering-api:qa', project ':x-pack:plugin:searchable-snapshots:qa', project ':x-pack:plugin:security:cli', project ':x-pack:plugin:security:lib', project ':x-pack:plugin:security:qa', project ':x-pack:plugin:shutdown:qa', project ':x-pack:plugin:slm:qa', project ':x-pack:plugin:snapshot-based-recoveries:qa', project ':x-pack:plugin:snapshot-repo-test-kit:qa', project ':x-pack:plugin:sql:jdbc', project ':x-pack:plugin:sql:qa', project ':x-pack:plugin:sql:sql-action', project ':x-pack:plugin:sql:sql-cli', project ':x-pack:plugin:sql:sql-client', project ':x-pack:plugin:sql:sql-proto', project ':x-pack:plugin:stack:qa', project ':x-pack:plugin:text-structure:qa', project ':x-pack:plugin:transform:qa', project ':x-pack:plugin:vector-tile:qa', project ':x-pack:plugin:watcher:qa', project ':x-pack:qa:multi-cluster-search-security:legacy-with-basic-license', project ':x-pack:qa:multi-cluster-search-security:legacy-with-full-license', project ':x-pack:qa:multi-cluster-search-security:legacy-with-restricted-trust', project ':x-pack:qa:runtime-fields:core-with-mapped', project ':x-pack:qa:runtime-fields:core-with-search', project ':x-pack:qa:runtime-fields:with-security', project ':x-pack:qa:third-party:active-directory', project ':x-pack:qa:third-party:jira', project ':x-pack:qa:third-party:pagerduty', project ':x-pack:qa:third-party:slack', project ':x-pack:plugin:async-search:qa:rest', project ':x-pack:plugin:async-search:qa:security', project ':x-pack:plugin:autoscaling:qa:rest', project ':x-pack:plugin:ccr:qa:downgrade-to-basic-license', project ':x-pack:plugin:ccr:qa:multi-cluster', project ':x-pack:plugin:ccr:qa:non-compliant-license', project ':x-pack:plugin:ccr:qa:rest', project ':x-pack:plugin:ccr:qa:restart', project ':x-pack:plugin:ccr:qa:security', project ':x-pack:plugin:deprecation:qa:common', project ':x-pack:plugin:deprecation:qa:early-deprecation-rest', project ':x-pack:plugin:deprecation:qa:rest', project ':x-pack:plugin:downsample:qa:mixed-cluster', project ':x-pack:plugin:downsample:qa:rest', project ':x-pack:plugin:downsample:qa:with-security', project ':x-pack:plugin:enrich:qa:common', project ':x-pack:plugin:enrich:qa:rest', project ':x-pack:plugin:enrich:qa:rest-with-advanced-security', project ':x-pack:plugin:enrich:qa:rest-with-security', project ':x-pack:plugin:ent-search:qa:full-cluster-restart', project ':x-pack:plugin:ent-search:qa:rest', project ':x-pack:plugin:eql:qa:ccs-rolling-upgrade', project ':x-pack:plugin:eql:qa:common', project ':x-pack:plugin:eql:qa:correctness', project ':x-pack:plugin:eql:qa:mixed-node', project ':x-pack:plugin:eql:qa:multi-cluster-with-security', project ':x-pack:plugin:eql:qa:rest', project ':x-pack:plugin:eql:qa:security', project ':x-pack:plugin:esql:compute:ann', project ':x-pack:plugin:esql:compute:gen', project ':x-pack:plugin:esql:qa:action', project ':x-pack:plugin:esql:qa:security', project ':x-pack:plugin:esql:qa:server', project ':x-pack:plugin:esql:qa:testFixtures', project ':x-pack:plugin:fleet:qa:rest', project ':x-pack:plugin:graph:qa:with-security', project ':x-pack:plugin:identity-provider:qa:idp-rest-tests', project ':x-pack:plugin:ilm:qa:multi-cluster', project ':x-pack:plugin:ilm:qa:multi-node', project ':x-pack:plugin:ilm:qa:rest', project ':x-pack:plugin:ilm:qa:with-security', project ':x-pack:plugin:inference:qa:inference-service-tests', project ':x-pack:plugin:inference:qa:mixed-cluster', project ':x-pack:plugin:inference:qa:rolling-upgrade', project ':x-pack:plugin:inference:qa:test-service-plugin', project ':x-pack:plugin:logsdb:qa:with-basic', project ':x-pack:plugin:logsdb:qa:with-custom-cutoff', project ':x-pack:plugin:ml:qa:basic-multi-node', project ':x-pack:plugin:ml:qa:disabled', project ':x-pack:plugin:ml:qa:ml-inference-service-tests', project ':x-pack:plugin:ml:qa:ml-with-security', project ':x-pack:plugin:ml:qa:multi-cluster-tests-with-security', project ':x-pack:plugin:ml:qa:native-multi-node-tests', project ':x-pack:plugin:ml:qa:no-bootstrap-tests', project ':x-pack:plugin:ml:qa:single-node-tests', project ':x-pack:plugin:repositories-metering-api:qa:azure', project ':x-pack:plugin:repositories-metering-api:qa:gcs', project ':x-pack:plugin:repositories-metering-api:qa:s3', project ':x-pack:plugin:searchable-snapshots:qa:azure', project ':x-pack:plugin:searchable-snapshots:qa:gcs', project ':x-pack:plugin:searchable-snapshots:qa:hdfs', project ':x-pack:plugin:searchable-snapshots:qa:minio', project ':x-pack:plugin:searchable-snapshots:qa:rest', project ':x-pack:plugin:searchable-snapshots:qa:s3', project ':x-pack:plugin:searchable-snapshots:qa:url', project ':x-pack:plugin:security:lib:nimbus-jose-jwt-modified', project ':x-pack:plugin:security:lib:nimbus-jose-jwt-modified-part1', project ':x-pack:plugin:security:lib:nimbus-jose-jwt-modified-part2', project ':x-pack:plugin:security:qa:audit', project ':x-pack:plugin:security:qa:basic-enable-security', project ':x-pack:plugin:security:qa:consistency-checks', project ':x-pack:plugin:security:qa:jwt-realm', project ':x-pack:plugin:security:qa:multi-cluster', project ':x-pack:plugin:security:qa:operator-privileges-tests', project ':x-pack:plugin:security:qa:profile', project ':x-pack:plugin:security:qa:saml-rest-tests', project ':x-pack:plugin:security:qa:secondary-auth-actions', project ':x-pack:plugin:security:qa:security-basic', project ':x-pack:plugin:security:qa:security-disabled', project ':x-pack:plugin:security:qa:security-trial', project ':x-pack:plugin:security:qa:service-account', project ':x-pack:plugin:security:qa:smoke-test-all-realms', project ':x-pack:plugin:security:qa:tls-basic', project ':x-pack:plugin:shutdown:qa:full-cluster-restart', project ':x-pack:plugin:shutdown:qa:multi-node', project ':x-pack:plugin:shutdown:qa:rolling-upgrade', project ':x-pack:plugin:slm:qa:multi-node', project ':x-pack:plugin:slm:qa:rest', project ':x-pack:plugin:slm:qa:with-security', project ':x-pack:plugin:snapshot-based-recoveries:qa:azure', project ':x-pack:plugin:snapshot-based-recoveries:qa:fs', project ':x-pack:plugin:snapshot-based-recoveries:qa:gcs', project ':x-pack:plugin:snapshot-based-recoveries:qa:license-enforcing', project ':x-pack:plugin:snapshot-based-recoveries:qa:s3', project ':x-pack:plugin:snapshot-repo-test-kit:qa:azure', project ':x-pack:plugin:snapshot-repo-test-kit:qa:gcs', project ':x-pack:plugin:snapshot-repo-test-kit:qa:hdfs', project ':x-pack:plugin:snapshot-repo-test-kit:qa:minio', project ':x-pack:plugin:snapshot-repo-test-kit:qa:rest', project ':x-pack:plugin:snapshot-repo-test-kit:qa:s3', project ':x-pack:plugin:sql:qa:jdbc', project ':x-pack:plugin:sql:qa:mixed-node', project ':x-pack:plugin:sql:qa:server', project ':x-pack:plugin:stack:qa:rest', project ':x-pack:plugin:text-structure:qa:text-structure-with-security', project ':x-pack:plugin:transform:qa:common', project ':x-pack:plugin:transform:qa:multi-cluster-tests-with-security', project ':x-pack:plugin:transform:qa:multi-node-tests', project ':x-pack:plugin:transform:qa:single-node-tests', project ':x-pack:plugin:vector-tile:qa:multi-cluster', project ':x-pack:plugin:watcher:qa:common', project ':x-pack:plugin:watcher:qa:rest', project ':x-pack:plugin:watcher:qa:with-security', project ':x-pack:plugin:esql:qa:server:mixed-cluster', project ':x-pack:plugin:esql:qa:server:multi-clusters', project ':x-pack:plugin:esql:qa:server:multi-node', project ':x-pack:plugin:esql:qa:server:single-node', project ':x-pack:plugin:sql:qa:jdbc:multi-node', project ':x-pack:plugin:sql:qa:jdbc:no-sql', project ':x-pack:plugin:sql:qa:jdbc:security', project ':x-pack:plugin:sql:qa:jdbc:single-node', project ':x-pack:plugin:sql:qa:server:multi-cluster-with-security', project ':x-pack:plugin:sql:qa:server:multi-node', project ':x-pack:plugin:sql:qa:server:security', project ':x-pack:plugin:sql:qa:server:single-node', project ':x-pack:plugin:sql:qa:jdbc:security:with-ssl', project ':x-pack:plugin:sql:qa:jdbc:security:without-ssl', project ':x-pack:plugin:sql:qa:server:security:with-ssl', project ':x-pack:plugin:sql:qa:server:security:without-ssl'] + +> Configure project : +Evaluating root project 'elasticsearch' using build file '/Users/rene/dev/elastic/elasticsearch/build.gradle'. + +> Configure project :benchmarks +Evaluating project ':benchmarks' using build file '/Users/rene/dev/elastic/elasticsearch/benchmarks/build.gradle'. + +> Configure project :client +Evaluating project ':client' using build file '/Users/rene/dev/elastic/elasticsearch/client/build.gradle'. + +> Configure project :distribution +Evaluating project ':distribution' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/build.gradle'. + +> Configure project :docs +Evaluating project ':docs' using build file '/Users/rene/dev/elastic/elasticsearch/docs/build.gradle'. + +> Configure project :libs +Evaluating project ':libs' using build file '/Users/rene/dev/elastic/elasticsearch/libs/build.gradle'. + +> Configure project :modules +Evaluating project ':modules' using build file '/Users/rene/dev/elastic/elasticsearch/modules/build.gradle'. + +> Configure project :plugins +Evaluating project ':plugins' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/build.gradle'. + +> Configure project :qa +Evaluating project ':qa' using build file '/Users/rene/dev/elastic/elasticsearch/qa/build.gradle'. + +> Configure project :rest-api-spec +Evaluating project ':rest-api-spec' using build file '/Users/rene/dev/elastic/elasticsearch/rest-api-spec/build.gradle'. + +> Configure project :server +Evaluating project ':server' using build file '/Users/rene/dev/elastic/elasticsearch/server/build.gradle'. + +> Configure project :test +Evaluating project ':test' using build file '/Users/rene/dev/elastic/elasticsearch/test/build.gradle'. + +> Configure project :x-pack +Evaluating project ':x-pack' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/build.gradle'. + +> Configure project :client:benchmark +Evaluating project ':client:benchmark' using build file '/Users/rene/dev/elastic/elasticsearch/client/benchmark/build.gradle'. + +> Configure project :client:client-benchmark-noop-api-plugin +Evaluating project ':client:client-benchmark-noop-api-plugin' using build file '/Users/rene/dev/elastic/elasticsearch/client/client-benchmark-noop-api-plugin/build.gradle'. + +> Configure project :client:rest +Evaluating project ':client:rest' using build file '/Users/rene/dev/elastic/elasticsearch/client/rest/build.gradle'. + +> Configure project :client:sniffer +Evaluating project ':client:sniffer' using build file '/Users/rene/dev/elastic/elasticsearch/client/sniffer/build.gradle'. + +> Configure project :client:test +Evaluating project ':client:test' using build file '/Users/rene/dev/elastic/elasticsearch/client/test/build.gradle'. + +> Configure project :distribution:archives +Evaluating project ':distribution:archives' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/archives/build.gradle'. + +> Configure project :distribution:bwc +Evaluating project ':distribution:bwc' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/bwc/build.gradle'. + +> Configure project :distribution:docker +Evaluating project ':distribution:docker' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/build.gradle'. + +> Configure project :distribution:packages +Evaluating project ':distribution:packages' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/packages/build.gradle'. +Transforming gradle-ospackage-plugin-11.10.0.jar (com.netflix.nebula:gradle-ospackage-plugin:11.10.0) with InstrumentationAnalysisTransform +Transforming commons-lang3-3.9.jar (org.apache.commons:commons-lang3:3.9) with InstrumentationAnalysisTransform +Transforming gradle-docker-plugin-3.2.1.jar (com.bmuschko:gradle-docker-plugin:3.2.1) with InstrumentationAnalysisTransform +Transforming redline-1.2.10.jar (org.redline-rpm:redline:1.2.10) with InstrumentationAnalysisTransform +Transforming jdeb-1.10.jar (org.vafer:jdeb:1.10) with InstrumentationAnalysisTransform +Transforming ant-1.10.12.jar (org.apache.ant:ant:1.10.12) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.5.jar (org.slf4j:slf4j-api:1.7.5) with InstrumentationAnalysisTransform +Transforming maven-archiver-3.5.1.jar (org.apache.maven:maven-archiver:3.5.1) with InstrumentationAnalysisTransform +Transforming plexus-archiver-4.2.3.jar (org.codehaus.plexus:plexus-archiver:4.2.3) with InstrumentationAnalysisTransform +Transforming commons-compress-1.21.jar (org.apache.commons:commons-compress:1.21) with InstrumentationAnalysisTransform +Transforming xz-1.8.jar (org.tukaani:xz:1.8) with InstrumentationAnalysisTransform +Transforming bcpg-jdk15on-1.69.jar (org.bouncycastle:bcpg-jdk15on:1.69) with InstrumentationAnalysisTransform +Transforming bcprov-jdk15on-1.69.jar (org.bouncycastle:bcprov-jdk15on:1.69) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.12.jar (org.apache.ant:ant-launcher:1.10.12) with InstrumentationAnalysisTransform +Transforming maven-model-3.1.1.jar (org.apache.maven:maven-model:3.1.1) with InstrumentationAnalysisTransform +Transforming maven-shared-utils-3.3.3.jar (org.apache.maven.shared:maven-shared-utils:3.3.3) with InstrumentationAnalysisTransform +Transforming plexus-io-3.2.0.jar (org.codehaus.plexus:plexus-io:3.2.0) with InstrumentationAnalysisTransform +Transforming commons-io-2.6.jar (commons-io:commons-io:2.6) with InstrumentationAnalysisTransform +Transforming plexus-interpolation-1.26.jar (org.codehaus.plexus:plexus-interpolation:1.26) with InstrumentationAnalysisTransform +Transforming snappy-0.4.jar (org.iq80.snappy:snappy:0.4) with InstrumentationAnalysisTransform +Transforming gradle-ospackage-plugin-11.10.0.jar (com.netflix.nebula:gradle-ospackage-plugin:11.10.0) with InstrumentationAnalysisTransform +Transforming gradle-ospackage-plugin-11.10.0.jar (com.netflix.nebula:gradle-ospackage-plugin:11.10.0) with MergeInstrumentationAnalysisTransform +Transforming commons-lang3-3.9.jar (org.apache.commons:commons-lang3:3.9) with InstrumentationAnalysisTransform +Transforming commons-lang3-3.9.jar (org.apache.commons:commons-lang3:3.9) with MergeInstrumentationAnalysisTransform +Transforming gradle-ospackage-plugin-11.10.0.jar (com.netflix.nebula:gradle-ospackage-plugin:11.10.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming gradle-docker-plugin-3.2.1.jar (com.bmuschko:gradle-docker-plugin:3.2.1) with InstrumentationAnalysisTransform +Transforming commons-lang3-3.9.jar (org.apache.commons:commons-lang3:3.9) with ExternalDependencyInstrumentingArtifactTransform +Transforming gradle-docker-plugin-3.2.1.jar (com.bmuschko:gradle-docker-plugin:3.2.1) with MergeInstrumentationAnalysisTransform +Transforming redline-1.2.10.jar (org.redline-rpm:redline:1.2.10) with InstrumentationAnalysisTransform +Transforming redline-1.2.10.jar (org.redline-rpm:redline:1.2.10) with MergeInstrumentationAnalysisTransform +Transforming gradle-docker-plugin-3.2.1.jar (com.bmuschko:gradle-docker-plugin:3.2.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming jdeb-1.10.jar (org.vafer:jdeb:1.10) with InstrumentationAnalysisTransform +Transforming jdeb-1.10.jar (org.vafer:jdeb:1.10) with MergeInstrumentationAnalysisTransform +Transforming redline-1.2.10.jar (org.redline-rpm:redline:1.2.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-1.10.12.jar (org.apache.ant:ant:1.10.12) with InstrumentationAnalysisTransform +Transforming jdeb-1.10.jar (org.vafer:jdeb:1.10) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-1.10.12.jar (org.apache.ant:ant:1.10.12) with MergeInstrumentationAnalysisTransform +Transforming slf4j-api-1.7.5.jar (org.slf4j:slf4j-api:1.7.5) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.5.jar (org.slf4j:slf4j-api:1.7.5) with MergeInstrumentationAnalysisTransform +Transforming ant-1.10.12.jar (org.apache.ant:ant:1.10.12) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-archiver-3.5.1.jar (org.apache.maven:maven-archiver:3.5.1) with InstrumentationAnalysisTransform +Transforming slf4j-api-1.7.5.jar (org.slf4j:slf4j-api:1.7.5) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-archiver-3.5.1.jar (org.apache.maven:maven-archiver:3.5.1) with MergeInstrumentationAnalysisTransform +Transforming plexus-archiver-4.2.3.jar (org.codehaus.plexus:plexus-archiver:4.2.3) with InstrumentationAnalysisTransform +Transforming plexus-archiver-4.2.3.jar (org.codehaus.plexus:plexus-archiver:4.2.3) with MergeInstrumentationAnalysisTransform +Transforming maven-archiver-3.5.1.jar (org.apache.maven:maven-archiver:3.5.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-compress-1.21.jar (org.apache.commons:commons-compress:1.21) with InstrumentationAnalysisTransform +Transforming commons-compress-1.21.jar (org.apache.commons:commons-compress:1.21) with MergeInstrumentationAnalysisTransform +Transforming plexus-archiver-4.2.3.jar (org.codehaus.plexus:plexus-archiver:4.2.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming xz-1.8.jar (org.tukaani:xz:1.8) with InstrumentationAnalysisTransform +Transforming xz-1.8.jar (org.tukaani:xz:1.8) with MergeInstrumentationAnalysisTransform +Transforming commons-compress-1.21.jar (org.apache.commons:commons-compress:1.21) with ExternalDependencyInstrumentingArtifactTransform +Transforming bcpg-jdk15on-1.69.jar (org.bouncycastle:bcpg-jdk15on:1.69) with InstrumentationAnalysisTransform +Transforming bcpg-jdk15on-1.69.jar (org.bouncycastle:bcpg-jdk15on:1.69) with MergeInstrumentationAnalysisTransform +Transforming xz-1.8.jar (org.tukaani:xz:1.8) with ExternalDependencyInstrumentingArtifactTransform +Transforming bcprov-jdk15on-1.69.jar (org.bouncycastle:bcprov-jdk15on:1.69) with InstrumentationAnalysisTransform +Transforming bcprov-jdk15on-1.69.jar (org.bouncycastle:bcprov-jdk15on:1.69) with MergeInstrumentationAnalysisTransform +Transforming ant-launcher-1.10.12.jar (org.apache.ant:ant-launcher:1.10.12) with InstrumentationAnalysisTransform +Transforming bcpg-jdk15on-1.69.jar (org.bouncycastle:bcpg-jdk15on:1.69) with ExternalDependencyInstrumentingArtifactTransform +Transforming bcprov-jdk15on-1.69.jar (org.bouncycastle:bcprov-jdk15on:1.69) with ExternalDependencyInstrumentingArtifactTransform +Transforming ant-launcher-1.10.12.jar (org.apache.ant:ant-launcher:1.10.12) with MergeInstrumentationAnalysisTransform +Transforming maven-model-3.1.1.jar (org.apache.maven:maven-model:3.1.1) with InstrumentationAnalysisTransform +Transforming maven-model-3.1.1.jar (org.apache.maven:maven-model:3.1.1) with MergeInstrumentationAnalysisTransform +Transforming maven-shared-utils-3.3.3.jar (org.apache.maven.shared:maven-shared-utils:3.3.3) with InstrumentationAnalysisTransform +Transforming ant-launcher-1.10.12.jar (org.apache.ant:ant-launcher:1.10.12) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-model-3.1.1.jar (org.apache.maven:maven-model:3.1.1) with ExternalDependencyInstrumentingArtifactTransform +Transforming maven-shared-utils-3.3.3.jar (org.apache.maven.shared:maven-shared-utils:3.3.3) with MergeInstrumentationAnalysisTransform +Transforming plexus-io-3.2.0.jar (org.codehaus.plexus:plexus-io:3.2.0) with InstrumentationAnalysisTransform +Transforming plexus-io-3.2.0.jar (org.codehaus.plexus:plexus-io:3.2.0) with MergeInstrumentationAnalysisTransform +Transforming commons-io-2.6.jar (commons-io:commons-io:2.6) with InstrumentationAnalysisTransform +Transforming maven-shared-utils-3.3.3.jar (org.apache.maven.shared:maven-shared-utils:3.3.3) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-io-3.2.0.jar (org.codehaus.plexus:plexus-io:3.2.0) with ExternalDependencyInstrumentingArtifactTransform +Transforming commons-io-2.6.jar (commons-io:commons-io:2.6) with MergeInstrumentationAnalysisTransform +Transforming plexus-interpolation-1.26.jar (org.codehaus.plexus:plexus-interpolation:1.26) with InstrumentationAnalysisTransform +Transforming plexus-interpolation-1.26.jar (org.codehaus.plexus:plexus-interpolation:1.26) with MergeInstrumentationAnalysisTransform +Transforming snappy-0.4.jar (org.iq80.snappy:snappy:0.4) with InstrumentationAnalysisTransform +Transforming commons-io-2.6.jar (commons-io:commons-io:2.6) with ExternalDependencyInstrumentingArtifactTransform +Transforming plexus-interpolation-1.26.jar (org.codehaus.plexus:plexus-interpolation:1.26) with ExternalDependencyInstrumentingArtifactTransform +Transforming snappy-0.4.jar (org.iq80.snappy:snappy:0.4) with MergeInstrumentationAnalysisTransform +Transforming snappy-0.4.jar (org.iq80.snappy:snappy:0.4) with ExternalDependencyInstrumentingArtifactTransform + +> Configure project :distribution:tools +Evaluating project ':distribution:tools' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/build.gradle'. + +> Configure project :libs:cli +Evaluating project ':libs:cli' using build file '/Users/rene/dev/elastic/elasticsearch/libs/cli/build.gradle'. + +> Configure project :libs:core +Evaluating project ':libs:core' using build file '/Users/rene/dev/elastic/elasticsearch/libs/core/build.gradle'. + +> Configure project :libs:dissect +Evaluating project ':libs:dissect' using build file '/Users/rene/dev/elastic/elasticsearch/libs/dissect/build.gradle'. + +> Configure project :libs:entitlement +Evaluating project ':libs:entitlement' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/build.gradle'. + +> Configure project :libs:geo +Evaluating project ':libs:geo' using build file '/Users/rene/dev/elastic/elasticsearch/libs/geo/build.gradle'. + +> Configure project :libs:grok +Evaluating project ':libs:grok' using build file '/Users/rene/dev/elastic/elasticsearch/libs/grok/build.gradle'. + +> Configure project :libs:h3 +Evaluating project ':libs:h3' using build file '/Users/rene/dev/elastic/elasticsearch/libs/h3/build.gradle'. + +> Configure project :libs:log4j +Evaluating project ':libs:log4j' using build file '/Users/rene/dev/elastic/elasticsearch/libs/log4j/build.gradle'. + +> Configure project :libs:logging +Evaluating project ':libs:logging' using build file '/Users/rene/dev/elastic/elasticsearch/libs/logging/build.gradle'. + +> Configure project :libs:logstash-bridge +Evaluating project ':libs:logstash-bridge' using build file '/Users/rene/dev/elastic/elasticsearch/libs/logstash-bridge/build.gradle'. + +> Configure project :libs:lz4 +Evaluating project ':libs:lz4' using build file '/Users/rene/dev/elastic/elasticsearch/libs/lz4/build.gradle'. + +> Configure project :libs:native +Evaluating project ':libs:native' using build file '/Users/rene/dev/elastic/elasticsearch/libs/native/build.gradle'. + +> Configure project :libs:plugin-analysis-api +Evaluating project ':libs:plugin-analysis-api' using build file '/Users/rene/dev/elastic/elasticsearch/libs/plugin-analysis-api/build.gradle'. + +> Configure project :libs:plugin-api +Evaluating project ':libs:plugin-api' using build file '/Users/rene/dev/elastic/elasticsearch/libs/plugin-api/build.gradle'. + +> Configure project :libs:plugin-scanner +Evaluating project ':libs:plugin-scanner' using build file '/Users/rene/dev/elastic/elasticsearch/libs/plugin-scanner/build.gradle'. + +> Configure project :libs:secure-sm +Evaluating project ':libs:secure-sm' using build file '/Users/rene/dev/elastic/elasticsearch/libs/secure-sm/build.gradle'. + +> Configure project :libs:simdvec +Evaluating project ':libs:simdvec' using build file '/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build.gradle'. + +> Configure project :libs:ssl-config +Evaluating project ':libs:ssl-config' using build file '/Users/rene/dev/elastic/elasticsearch/libs/ssl-config/build.gradle'. + +> Configure project :libs:tdigest +Evaluating project ':libs:tdigest' using build file '/Users/rene/dev/elastic/elasticsearch/libs/tdigest/build.gradle'. + +> Configure project :libs:x-content +Evaluating project ':libs:x-content' using build file '/Users/rene/dev/elastic/elasticsearch/libs/x-content/build.gradle'. + +> Configure project :modules:aggregations +Evaluating project ':modules:aggregations' using build file '/Users/rene/dev/elastic/elasticsearch/modules/aggregations/build.gradle'. + +> Configure project :modules:analysis-common +Evaluating project ':modules:analysis-common' using build file '/Users/rene/dev/elastic/elasticsearch/modules/analysis-common/build.gradle'. + +> Configure project :modules:apm +Evaluating project ':modules:apm' using build file '/Users/rene/dev/elastic/elasticsearch/modules/apm/build.gradle'. + +> Configure project :modules:data-streams +Evaluating project ':modules:data-streams' using build file '/Users/rene/dev/elastic/elasticsearch/modules/data-streams/build.gradle'. + +> Configure project :modules:dot-prefix-validation +Evaluating project ':modules:dot-prefix-validation' using build file '/Users/rene/dev/elastic/elasticsearch/modules/dot-prefix-validation/build.gradle'. + +> Configure project :modules:health-shards-availability +Evaluating project ':modules:health-shards-availability' using build file '/Users/rene/dev/elastic/elasticsearch/modules/health-shards-availability/build.gradle'. + +> Configure project :modules:ingest-attachment +Evaluating project ':modules:ingest-attachment' using build file '/Users/rene/dev/elastic/elasticsearch/modules/ingest-attachment/build.gradle'. + +> Configure project :modules:ingest-common +Evaluating project ':modules:ingest-common' using build file '/Users/rene/dev/elastic/elasticsearch/modules/ingest-common/build.gradle'. + +> Configure project :modules:ingest-geoip +Evaluating project ':modules:ingest-geoip' using build file '/Users/rene/dev/elastic/elasticsearch/modules/ingest-geoip/build.gradle'. + +> Configure project :modules:ingest-user-agent +Evaluating project ':modules:ingest-user-agent' using build file '/Users/rene/dev/elastic/elasticsearch/modules/ingest-user-agent/build.gradle'. + +> Configure project :modules:kibana +Evaluating project ':modules:kibana' using build file '/Users/rene/dev/elastic/elasticsearch/modules/kibana/build.gradle'. + +> Configure project :modules:lang-expression +Evaluating project ':modules:lang-expression' using build file '/Users/rene/dev/elastic/elasticsearch/modules/lang-expression/build.gradle'. + +> Configure project :modules:lang-mustache +Evaluating project ':modules:lang-mustache' using build file '/Users/rene/dev/elastic/elasticsearch/modules/lang-mustache/build.gradle'. + +> Configure project :modules:lang-painless +Evaluating project ':modules:lang-painless' using build file '/Users/rene/dev/elastic/elasticsearch/modules/lang-painless/build.gradle'. + +> Configure project :modules:legacy-geo +Evaluating project ':modules:legacy-geo' using build file '/Users/rene/dev/elastic/elasticsearch/modules/legacy-geo/build.gradle'. + +> Configure project :modules:mapper-extras +Evaluating project ':modules:mapper-extras' using build file '/Users/rene/dev/elastic/elasticsearch/modules/mapper-extras/build.gradle'. + +> Configure project :modules:parent-join +Evaluating project ':modules:parent-join' using build file '/Users/rene/dev/elastic/elasticsearch/modules/parent-join/build.gradle'. + +> Configure project :modules:percolator +Evaluating project ':modules:percolator' using build file '/Users/rene/dev/elastic/elasticsearch/modules/percolator/build.gradle'. + +> Configure project :modules:rank-eval +Evaluating project ':modules:rank-eval' using build file '/Users/rene/dev/elastic/elasticsearch/modules/rank-eval/build.gradle'. + +> Configure project :modules:reindex +Evaluating project ':modules:reindex' using build file '/Users/rene/dev/elastic/elasticsearch/modules/reindex/build.gradle'. + +> Configure project :modules:repository-azure +Evaluating project ':modules:repository-azure' using build file '/Users/rene/dev/elastic/elasticsearch/modules/repository-azure/build.gradle'. + +> Configure project :modules:repository-gcs +Evaluating project ':modules:repository-gcs' using build file '/Users/rene/dev/elastic/elasticsearch/modules/repository-gcs/build.gradle'. + +> Configure project :modules:repository-s3 +Evaluating project ':modules:repository-s3' using build file '/Users/rene/dev/elastic/elasticsearch/modules/repository-s3/build.gradle'. + +> Configure project :modules:repository-url +Evaluating project ':modules:repository-url' using build file '/Users/rene/dev/elastic/elasticsearch/modules/repository-url/build.gradle'. + +> Configure project :modules:rest-root +Evaluating project ':modules:rest-root' using build file '/Users/rene/dev/elastic/elasticsearch/modules/rest-root/build.gradle'. + +> Configure project :modules:runtime-fields-common +Evaluating project ':modules:runtime-fields-common' using build file '/Users/rene/dev/elastic/elasticsearch/modules/runtime-fields-common/build.gradle'. + +> Configure project :modules:systemd +Evaluating project ':modules:systemd' using build file '/Users/rene/dev/elastic/elasticsearch/modules/systemd/build.gradle'. + +> Configure project :modules:transport-netty4 +Evaluating project ':modules:transport-netty4' using build file '/Users/rene/dev/elastic/elasticsearch/modules/transport-netty4/build.gradle'. + +> Configure project :plugins:analysis-icu +Evaluating project ':plugins:analysis-icu' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/analysis-icu/build.gradle'. + +> Configure project :plugins:analysis-kuromoji +Evaluating project ':plugins:analysis-kuromoji' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/analysis-kuromoji/build.gradle'. + +> Configure project :plugins:analysis-nori +Evaluating project ':plugins:analysis-nori' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/analysis-nori/build.gradle'. + +> Configure project :plugins:analysis-phonetic +Evaluating project ':plugins:analysis-phonetic' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/analysis-phonetic/build.gradle'. + +> Configure project :plugins:analysis-smartcn +Evaluating project ':plugins:analysis-smartcn' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/analysis-smartcn/build.gradle'. + +> Configure project :plugins:analysis-stempel +Evaluating project ':plugins:analysis-stempel' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/analysis-stempel/build.gradle'. + +> Configure project :plugins:analysis-ukrainian +Evaluating project ':plugins:analysis-ukrainian' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/analysis-ukrainian/build.gradle'. + +> Configure project :plugins:discovery-azure-classic +Evaluating project ':plugins:discovery-azure-classic' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/discovery-azure-classic/build.gradle'. + +> Configure project :plugins:discovery-ec2 +Evaluating project ':plugins:discovery-ec2' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/discovery-ec2/build.gradle'. + +> Configure project :plugins:discovery-gce +Evaluating project ':plugins:discovery-gce' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/discovery-gce/build.gradle'. + +> Configure project :plugins:mapper-annotated-text +Evaluating project ':plugins:mapper-annotated-text' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/mapper-annotated-text/build.gradle'. + +> Configure project :plugins:mapper-murmur3 +Evaluating project ':plugins:mapper-murmur3' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/mapper-murmur3/build.gradle'. + +> Configure project :plugins:mapper-size +Evaluating project ':plugins:mapper-size' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/mapper-size/build.gradle'. + +> Configure project :plugins:repository-hdfs +Evaluating project ':plugins:repository-hdfs' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/repository-hdfs/build.gradle'. + +> Configure project :plugins:store-smb +Evaluating project ':plugins:store-smb' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/store-smb/build.gradle'. + +> Configure project :qa:ccs-common-rest +Evaluating project ':qa:ccs-common-rest' using build file '/Users/rene/dev/elastic/elasticsearch/qa/ccs-common-rest/build.gradle'. + +> Configure project :qa:ccs-rolling-upgrade-remote-cluster +Evaluating project ':qa:ccs-rolling-upgrade-remote-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/qa/ccs-rolling-upgrade-remote-cluster/build.gradle'. + +> Configure project :qa:ccs-unavailable-clusters +Evaluating project ':qa:ccs-unavailable-clusters' using build file '/Users/rene/dev/elastic/elasticsearch/qa/ccs-unavailable-clusters/build.gradle'. + +> Configure project :qa:custom-rest-controller +Evaluating project ':qa:custom-rest-controller' using build file '/Users/rene/dev/elastic/elasticsearch/qa/custom-rest-controller/build.gradle'. + +> Configure project :qa:evil-tests +Evaluating project ':qa:evil-tests' using build file '/Users/rene/dev/elastic/elasticsearch/qa/evil-tests/build.gradle'. + +> Configure project :qa:full-cluster-restart +Evaluating project ':qa:full-cluster-restart' using build file '/Users/rene/dev/elastic/elasticsearch/qa/full-cluster-restart/build.gradle'. + +> Configure project :qa:logging-config +Evaluating project ':qa:logging-config' using build file '/Users/rene/dev/elastic/elasticsearch/qa/logging-config/build.gradle'. + +> Configure project :qa:logging-spi +Evaluating project ':qa:logging-spi' using build file '/Users/rene/dev/elastic/elasticsearch/qa/logging-spi/build.gradle'. + +> Configure project :qa:lucene-index-compatibility +Evaluating project ':qa:lucene-index-compatibility' using build file '/Users/rene/dev/elastic/elasticsearch/qa/lucene-index-compatibility/build.gradle'. + +> Configure project :qa:mixed-cluster +Evaluating project ':qa:mixed-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/qa/mixed-cluster/build.gradle'. + +> Configure project :qa:multi-cluster-search +Evaluating project ':qa:multi-cluster-search' using build file '/Users/rene/dev/elastic/elasticsearch/qa/multi-cluster-search/build.gradle'. + +> Configure project :qa:no-bootstrap-tests +Evaluating project ':qa:no-bootstrap-tests' using build file '/Users/rene/dev/elastic/elasticsearch/qa/no-bootstrap-tests/build.gradle'. + +> Configure project :qa:packaging +Evaluating project ':qa:packaging' using build file '/Users/rene/dev/elastic/elasticsearch/qa/packaging/build.gradle'. + +> Configure project :qa:remote-clusters +Evaluating project ':qa:remote-clusters' using build file '/Users/rene/dev/elastic/elasticsearch/qa/remote-clusters/build.gradle'. + +> Configure project :qa:repository-multi-version +Evaluating project ':qa:repository-multi-version' using build file '/Users/rene/dev/elastic/elasticsearch/qa/repository-multi-version/build.gradle'. + +> Configure project :qa:restricted-loggers +Evaluating project ':qa:restricted-loggers' using build file '/Users/rene/dev/elastic/elasticsearch/qa/restricted-loggers/build.gradle'. + +> Configure project :qa:rolling-upgrade +Evaluating project ':qa:rolling-upgrade' using build file '/Users/rene/dev/elastic/elasticsearch/qa/rolling-upgrade/build.gradle'. + +> Configure project :qa:rolling-upgrade-legacy +Evaluating project ':qa:rolling-upgrade-legacy' using build file '/Users/rene/dev/elastic/elasticsearch/qa/rolling-upgrade-legacy/build.gradle'. + +> Configure project :qa:smoke-test-http +Evaluating project ':qa:smoke-test-http' using build file '/Users/rene/dev/elastic/elasticsearch/qa/smoke-test-http/build.gradle'. + +> Configure project :qa:smoke-test-ingest-disabled +Evaluating project ':qa:smoke-test-ingest-disabled' using build file '/Users/rene/dev/elastic/elasticsearch/qa/smoke-test-ingest-disabled/build.gradle'. + +> Configure project :qa:smoke-test-ingest-with-all-dependencies +Evaluating project ':qa:smoke-test-ingest-with-all-dependencies' using build file '/Users/rene/dev/elastic/elasticsearch/qa/smoke-test-ingest-with-all-dependencies/build.gradle'. + +> Configure project :qa:smoke-test-multinode +Evaluating project ':qa:smoke-test-multinode' using build file '/Users/rene/dev/elastic/elasticsearch/qa/smoke-test-multinode/build.gradle'. + +> Configure project :qa:smoke-test-plugins +Evaluating project ':qa:smoke-test-plugins' using build file '/Users/rene/dev/elastic/elasticsearch/qa/smoke-test-plugins/build.gradle'. + +> Configure project :qa:stable-api +Evaluating project ':qa:stable-api' using build file '/Users/rene/dev/elastic/elasticsearch/qa/stable-api/build.gradle'. + +> Configure project :qa:system-indices +Evaluating project ':qa:system-indices' using build file '/Users/rene/dev/elastic/elasticsearch/qa/system-indices/build.gradle'. + +> Configure project :qa:unconfigured-node-name +Evaluating project ':qa:unconfigured-node-name' using build file '/Users/rene/dev/elastic/elasticsearch/qa/unconfigured-node-name/build.gradle'. + +> Configure project :qa:verify-version-constants +Evaluating project ':qa:verify-version-constants' using build file '/Users/rene/dev/elastic/elasticsearch/qa/verify-version-constants/build.gradle'. + +> Configure project :test:external-modules +Evaluating project ':test:external-modules' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/build.gradle'. + +> Configure project :test:fixtures +Evaluating project ':test:fixtures' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/build.gradle'. + +> Configure project :test:framework +Evaluating project ':test:framework' using build file '/Users/rene/dev/elastic/elasticsearch/test/framework/build.gradle'. + +> Configure project :test:immutable-collections-patch +Evaluating project ':test:immutable-collections-patch' using build file '/Users/rene/dev/elastic/elasticsearch/test/immutable-collections-patch/build.gradle'. + +> Configure project :test:logger-usage +Evaluating project ':test:logger-usage' using build file '/Users/rene/dev/elastic/elasticsearch/test/logger-usage/build.gradle'. + +> Configure project :test:metadata-extractor +Evaluating project ':test:metadata-extractor' using build file '/Users/rene/dev/elastic/elasticsearch/test/metadata-extractor/build.gradle'. + +> Configure project :test:test-clusters +Evaluating project ':test:test-clusters' using build file '/Users/rene/dev/elastic/elasticsearch/test/test-clusters/build.gradle'. + +> Configure project :test:x-content +Evaluating project ':test:x-content' using build file '/Users/rene/dev/elastic/elasticsearch/test/x-content/build.gradle'. + +> Configure project :test:yaml-rest-runner +Evaluating project ':test:yaml-rest-runner' using build file '/Users/rene/dev/elastic/elasticsearch/test/yaml-rest-runner/build.gradle'. + +> Configure project :x-pack:libs +Evaluating project ':x-pack:libs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/libs/build.gradle'. + +> Configure project :x-pack:license-tools +Evaluating project ':x-pack:license-tools' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/license-tools/build.gradle'. + +> Configure project :x-pack:plugin +Evaluating project ':x-pack:plugin' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/build.gradle'. + +> Configure project :x-pack:qa +Evaluating project ':x-pack:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/build.gradle'. + +> Configure project :x-pack:rest-resources-zip +Evaluating project ':x-pack:rest-resources-zip' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/rest-resources-zip/build.gradle'. + +> Configure project :x-pack:test +Evaluating project ':x-pack:test' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/test/build.gradle'. + +> Configure project :distribution:archives:darwin-aarch64-tar +Evaluating project ':distribution:archives:darwin-aarch64-tar' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/archives/darwin-aarch64-tar/build.gradle'. + +> Configure project :distribution:archives:darwin-tar +Evaluating project ':distribution:archives:darwin-tar' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/archives/darwin-tar/build.gradle'. + +> Configure project :distribution:archives:integ-test-zip +Evaluating project ':distribution:archives:integ-test-zip' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/archives/integ-test-zip/build.gradle'. + +> Configure project :distribution:archives:linux-aarch64-tar +Evaluating project ':distribution:archives:linux-aarch64-tar' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/archives/linux-aarch64-tar/build.gradle'. + +> Configure project :distribution:archives:linux-tar +Evaluating project ':distribution:archives:linux-tar' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/archives/linux-tar/build.gradle'. + +> Configure project :distribution:archives:windows-zip +Evaluating project ':distribution:archives:windows-zip' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/archives/windows-zip/build.gradle'. + +> Configure project :distribution:bwc:bugfix +Evaluating project ':distribution:bwc:bugfix' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/bwc/bugfix/build.gradle'. + +> Configure project :distribution:bwc:bugfix2 +Evaluating project ':distribution:bwc:bugfix2' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/bwc/bugfix2/build.gradle'. + +> Configure project :distribution:bwc:maintenance +Evaluating project ':distribution:bwc:maintenance' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/bwc/maintenance/build.gradle'. + +> Configure project :distribution:bwc:minor +Evaluating project ':distribution:bwc:minor' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/bwc/minor/build.gradle'. + +> Configure project :distribution:bwc:staged +Evaluating project ':distribution:bwc:staged' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/bwc/staged/build.gradle'. + +> Configure project :distribution:docker:cloud-ess-docker-aarch64-export +Evaluating project ':distribution:docker:cloud-ess-docker-aarch64-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/cloud-ess-docker-aarch64-export/build.gradle'. + +> Configure project :distribution:docker:cloud-ess-docker-export +Evaluating project ':distribution:docker:cloud-ess-docker-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/cloud-ess-docker-export/build.gradle'. + +> Configure project :distribution:docker:docker-aarch64-export +Evaluating project ':distribution:docker:docker-aarch64-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/docker-aarch64-export/build.gradle'. + +> Configure project :distribution:docker:docker-export +Evaluating project ':distribution:docker:docker-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/docker-export/build.gradle'. + +> Configure project :distribution:docker:ironbank-docker-aarch64-export +Evaluating project ':distribution:docker:ironbank-docker-aarch64-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/ironbank-docker-aarch64-export/build.gradle'. + +> Configure project :distribution:docker:ironbank-docker-export +Evaluating project ':distribution:docker:ironbank-docker-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/ironbank-docker-export/build.gradle'. + +> Configure project :distribution:docker:wolfi-docker-aarch64-export +Evaluating project ':distribution:docker:wolfi-docker-aarch64-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/wolfi-docker-aarch64-export/build.gradle'. + +> Configure project :distribution:docker:wolfi-docker-export +Evaluating project ':distribution:docker:wolfi-docker-export' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/docker/wolfi-docker-export/build.gradle'. + +> Configure project :distribution:packages:aarch64-deb +Evaluating project ':distribution:packages:aarch64-deb' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/packages/aarch64-deb/build.gradle'. + +> Configure project :distribution:packages:aarch64-rpm +Evaluating project ':distribution:packages:aarch64-rpm' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/packages/aarch64-rpm/build.gradle'. + +> Configure project :distribution:packages:deb +Evaluating project ':distribution:packages:deb' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/packages/deb/build.gradle'. + +> Configure project :distribution:packages:rpm +Evaluating project ':distribution:packages:rpm' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/packages/rpm/build.gradle'. + +> Configure project :distribution:tools:ansi-console +Evaluating project ':distribution:tools:ansi-console' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/ansi-console/build.gradle'. + +> Configure project :distribution:tools:cli-launcher +Evaluating project ':distribution:tools:cli-launcher' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/cli-launcher/build.gradle'. + +> Configure project :distribution:tools:geoip-cli +Evaluating project ':distribution:tools:geoip-cli' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/geoip-cli/build.gradle'. + +> Configure project :distribution:tools:java-version-checker +Evaluating project ':distribution:tools:java-version-checker' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/java-version-checker/build.gradle'. + +> Configure project :distribution:tools:keystore-cli +Evaluating project ':distribution:tools:keystore-cli' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/keystore-cli/build.gradle'. + +> Configure project :distribution:tools:plugin-cli +Evaluating project ':distribution:tools:plugin-cli' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/plugin-cli/build.gradle'. + +> Configure project :distribution:tools:server-cli +Evaluating project ':distribution:tools:server-cli' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/server-cli/build.gradle'. + +> Configure project :distribution:tools:windows-service-cli +Evaluating project ':distribution:tools:windows-service-cli' using build file '/Users/rene/dev/elastic/elasticsearch/distribution/tools/windows-service-cli/build.gradle'. + +> Configure project :libs:entitlement:agent +Evaluating project ':libs:entitlement:agent' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/agent/build.gradle'. + +> Configure project :libs:entitlement:asm-provider +Evaluating project ':libs:entitlement:asm-provider' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/asm-provider/build.gradle'. + +> Configure project :libs:entitlement:bridge +Evaluating project ':libs:entitlement:bridge' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/bridge/build.gradle'. + +> Configure project :libs:entitlement:qa +Evaluating project ':libs:entitlement:qa' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/qa/build.gradle'. + +> Configure project :libs:entitlement:tools +Evaluating project ':libs:entitlement:tools' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/tools/build.gradle'. + +> Configure project :libs:native:native-libraries +Evaluating project ':libs:native:native-libraries' using build file '/Users/rene/dev/elastic/elasticsearch/libs/native/libraries/build.gradle'. + +> Configure project :libs:x-content:impl +Evaluating project ':libs:x-content:impl' using build file '/Users/rene/dev/elastic/elasticsearch/libs/x-content/impl/build.gradle'. + +> Configure project :modules:ingest-geoip:qa +Evaluating project ':modules:ingest-geoip:qa' using build file '/Users/rene/dev/elastic/elasticsearch/modules/ingest-geoip/qa/build.gradle'. + +> Configure project :modules:lang-painless:spi +Evaluating project ':modules:lang-painless:spi' using build file '/Users/rene/dev/elastic/elasticsearch/modules/lang-painless/spi/build.gradle'. + +> Configure project :plugins:discovery-ec2:qa +Evaluating project ':plugins:discovery-ec2:qa' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/discovery-ec2/qa/build.gradle'. + +> Configure project :plugins:discovery-gce:qa +Evaluating project ':plugins:discovery-gce:qa' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/discovery-gce/qa/build.gradle'. + +> Configure project :plugins:repository-hdfs:hadoop-client-api +Evaluating project ':plugins:repository-hdfs:hadoop-client-api' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/repository-hdfs/hadoop-client-api/build.gradle'. + +> Configure project :qa:stable-api:logging +Evaluating project ':qa:stable-api:logging' using build file '/Users/rene/dev/elastic/elasticsearch/qa/stable-api/logging/build.gradle'. + +> Configure project :qa:stable-api:plugin-analysis-api +Evaluating project ':qa:stable-api:plugin-analysis-api' using build file '/Users/rene/dev/elastic/elasticsearch/qa/stable-api/plugin-analysis-api/build.gradle'. + +> Configure project :qa:stable-api:plugin-api +Evaluating project ':qa:stable-api:plugin-api' using build file '/Users/rene/dev/elastic/elasticsearch/qa/stable-api/plugin-api/build.gradle'. + +> Configure project :test:external-modules:test-apm-integration +Evaluating project ':test:external-modules:test-apm-integration' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/apm-integration/build.gradle'. + +> Configure project :test:external-modules:test-delayed-aggs +Evaluating project ':test:external-modules:test-delayed-aggs' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/delayed-aggs/build.gradle'. + +> Configure project :test:external-modules:test-die-with-dignity +Evaluating project ':test:external-modules:test-die-with-dignity' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/die-with-dignity/build.gradle'. + +> Configure project :test:external-modules:test-error-query +Evaluating project ':test:external-modules:test-error-query' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/error-query/build.gradle'. + +> Configure project :test:external-modules:test-esql-heap-attack +Evaluating project ':test:external-modules:test-esql-heap-attack' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/esql-heap-attack/build.gradle'. + +> Configure project :test:external-modules:test-jvm-crash +Evaluating project ':test:external-modules:test-jvm-crash' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/jvm-crash/build.gradle'. + +> Configure project :test:external-modules:test-latency-simulating-directory +Evaluating project ':test:external-modules:test-latency-simulating-directory' using build file '/Users/rene/dev/elastic/elasticsearch/test/external-modules/latency-simulating-directory/build.gradle'. + +> Configure project :test:fixtures:aws-sts-fixture +Evaluating project ':test:fixtures:aws-sts-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/aws-sts-fixture/build.gradle'. + +> Configure project :test:fixtures:azure-fixture +Evaluating project ':test:fixtures:azure-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/azure-fixture/build.gradle'. + +> Configure project :test:fixtures:ec2-imds-fixture +Evaluating project ':test:fixtures:ec2-imds-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/ec2-imds-fixture/build.gradle'. + +> Configure project :test:fixtures:gcs-fixture +Evaluating project ':test:fixtures:gcs-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/gcs-fixture/build.gradle'. + +> Configure project :test:fixtures:geoip-fixture +Evaluating project ':test:fixtures:geoip-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/geoip-fixture/build.gradle'. + +> Configure project :test:fixtures:hdfs-fixture +Evaluating project ':test:fixtures:hdfs-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/hdfs-fixture/build.gradle'. + +> Configure project :test:fixtures:krb5kdc-fixture +Evaluating project ':test:fixtures:krb5kdc-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/krb5kdc-fixture/build.gradle'. + +> Configure project :test:fixtures:minio-fixture +Evaluating project ':test:fixtures:minio-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/minio-fixture/build.gradle'. + +> Configure project :test:fixtures:old-elasticsearch +Evaluating project ':test:fixtures:old-elasticsearch' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/old-elasticsearch/build.gradle'. + +> Configure project :test:fixtures:s3-fixture +Evaluating project ':test:fixtures:s3-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/s3-fixture/build.gradle'. + +> Configure project :test:fixtures:testcontainer-utils +Evaluating project ':test:fixtures:testcontainer-utils' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/testcontainer-utils/build.gradle'. + +> Configure project :test:fixtures:url-fixture +Evaluating project ':test:fixtures:url-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/test/fixtures/url-fixture/build.gradle'. + +> Configure project :x-pack:libs:es-opensaml-security-api +Evaluating project ':x-pack:libs:es-opensaml-security-api' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/libs/es-opensaml-security-api/build.gradle'. + +> Configure project :x-pack:plugin:analytics +Evaluating project ':x-pack:plugin:analytics' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/analytics/build.gradle'. + +> Configure project :x-pack:plugin:apm-data +Evaluating project ':x-pack:plugin:apm-data' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/apm-data/build.gradle'. + +> Configure project :x-pack:plugin:async +Evaluating project ':x-pack:plugin:async' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/async/build.gradle'. + +> Configure project :x-pack:plugin:async-search +Evaluating project ':x-pack:plugin:async-search' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/async-search/build.gradle'. + +> Configure project :x-pack:plugin:autoscaling +Evaluating project ':x-pack:plugin:autoscaling' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/autoscaling/build.gradle'. + +> Configure project :x-pack:plugin:blob-cache +Evaluating project ':x-pack:plugin:blob-cache' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/blob-cache/build.gradle'. + +> Configure project :x-pack:plugin:ccr +Evaluating project ':x-pack:plugin:ccr' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/build.gradle'. + +> Configure project :x-pack:plugin:core +Evaluating project ':x-pack:plugin:core' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/core/build.gradle'. + +> Configure project :x-pack:plugin:deprecation +Evaluating project ':x-pack:plugin:deprecation' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/deprecation/build.gradle'. + +> Configure project :x-pack:plugin:downsample +Evaluating project ':x-pack:plugin:downsample' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/downsample/build.gradle'. + +> Configure project :x-pack:plugin:enrich +Evaluating project ':x-pack:plugin:enrich' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/enrich/build.gradle'. + +> Configure project :x-pack:plugin:ent-search +Evaluating project ':x-pack:plugin:ent-search' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ent-search/build.gradle'. + +> Configure project :x-pack:plugin:eql +Evaluating project ':x-pack:plugin:eql' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/build.gradle'. + +> Configure project :x-pack:plugin:esql +Evaluating project ':x-pack:plugin:esql' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/build.gradle'. + +> Configure project :x-pack:plugin:esql-core +Evaluating project ':x-pack:plugin:esql-core' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql-core/build.gradle'. + +> Configure project :x-pack:plugin:fleet +Evaluating project ':x-pack:plugin:fleet' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/fleet/build.gradle'. + +> Configure project :x-pack:plugin:frozen-indices +Evaluating project ':x-pack:plugin:frozen-indices' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/frozen-indices/build.gradle'. + +> Configure project :x-pack:plugin:geoip-enterprise-downloader +Evaluating project ':x-pack:plugin:geoip-enterprise-downloader' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/geoip-enterprise-downloader/build.gradle'. + +> Configure project :x-pack:plugin:graph +Evaluating project ':x-pack:plugin:graph' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/graph/build.gradle'. + +> Configure project :x-pack:plugin:identity-provider +Evaluating project ':x-pack:plugin:identity-provider' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/identity-provider/build.gradle'. + +> Configure project :x-pack:plugin:ilm +Evaluating project ':x-pack:plugin:ilm' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ilm/build.gradle'. + +> Configure project :x-pack:plugin:inference +Evaluating project ':x-pack:plugin:inference' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/inference/build.gradle'. + +> Configure project :x-pack:plugin:kql +Evaluating project ':x-pack:plugin:kql' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/kql/build.gradle'. + +> Configure project :x-pack:plugin:logsdb +Evaluating project ':x-pack:plugin:logsdb' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/logsdb/build.gradle'. + +> Configure project :x-pack:plugin:logstash +Evaluating project ':x-pack:plugin:logstash' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/logstash/build.gradle'. + +> Configure project :x-pack:plugin:mapper-aggregate-metric +Evaluating project ':x-pack:plugin:mapper-aggregate-metric' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/mapper-aggregate-metric/build.gradle'. + +> Configure project :x-pack:plugin:mapper-constant-keyword +Evaluating project ':x-pack:plugin:mapper-constant-keyword' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/mapper-constant-keyword/build.gradle'. + +> Configure project :x-pack:plugin:mapper-counted-keyword +Evaluating project ':x-pack:plugin:mapper-counted-keyword' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/mapper-counted-keyword/build.gradle'. + +> Configure project :x-pack:plugin:mapper-unsigned-long +Evaluating project ':x-pack:plugin:mapper-unsigned-long' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/mapper-unsigned-long/build.gradle'. + +> Configure project :x-pack:plugin:mapper-version +Evaluating project ':x-pack:plugin:mapper-version' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/mapper-version/build.gradle'. + +> Configure project :x-pack:plugin:migrate +Evaluating project ':x-pack:plugin:migrate' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/migrate/build.gradle'. + +> Configure project :x-pack:plugin:ml +Evaluating project ':x-pack:plugin:ml' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/build.gradle'. + +> Configure project :x-pack:plugin:ml-package-loader +Evaluating project ':x-pack:plugin:ml-package-loader' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml-package-loader/build.gradle'. + +> Configure project :x-pack:plugin:monitoring +Evaluating project ':x-pack:plugin:monitoring' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/monitoring/build.gradle'. + +> Configure project :x-pack:plugin:old-lucene-versions +Evaluating project ':x-pack:plugin:old-lucene-versions' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/old-lucene-versions/build.gradle'. + +> Configure project :x-pack:plugin:otel-data +Evaluating project ':x-pack:plugin:otel-data' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/otel-data/build.gradle'. + +> Configure project :x-pack:plugin:profiling +Evaluating project ':x-pack:plugin:profiling' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/profiling/build.gradle'. + +> Configure project :x-pack:plugin:ql +Evaluating project ':x-pack:plugin:ql' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ql/build.gradle'. + +> Configure project :x-pack:plugin:rank-rrf +Evaluating project ':x-pack:plugin:rank-rrf' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/rank-rrf/build.gradle'. + +> Configure project :x-pack:plugin:redact +Evaluating project ':x-pack:plugin:redact' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/redact/build.gradle'. + +> Configure project :x-pack:plugin:repositories-metering-api +Evaluating project ':x-pack:plugin:repositories-metering-api' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/repositories-metering-api/build.gradle'. + +> Configure project :x-pack:plugin:rollup +Evaluating project ':x-pack:plugin:rollup' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/rollup/build.gradle'. + +> Configure project :x-pack:plugin:search-business-rules +Evaluating project ':x-pack:plugin:search-business-rules' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/search-business-rules/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots +Evaluating project ':x-pack:plugin:searchable-snapshots' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/build.gradle'. + +> Configure project :x-pack:plugin:security +Evaluating project ':x-pack:plugin:security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/build.gradle'. + +> Configure project :x-pack:plugin:shutdown +Evaluating project ':x-pack:plugin:shutdown' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/shutdown/build.gradle'. + +> Configure project :x-pack:plugin:slm +Evaluating project ':x-pack:plugin:slm' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/slm/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-based-recoveries +Evaluating project ':x-pack:plugin:snapshot-based-recoveries' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-based-recoveries/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/build.gradle'. + +> Configure project :x-pack:plugin:spatial +Evaluating project ':x-pack:plugin:spatial' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/spatial/build.gradle'. + +> Configure project :x-pack:plugin:sql +Evaluating project ':x-pack:plugin:sql' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/build.gradle'. + +> Configure project :x-pack:plugin:stack +Evaluating project ':x-pack:plugin:stack' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/stack/build.gradle'. + +> Configure project :x-pack:plugin:text-structure +Evaluating project ':x-pack:plugin:text-structure' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/text-structure/build.gradle'. + +> Configure project :x-pack:plugin:transform +Evaluating project ':x-pack:plugin:transform' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/transform/build.gradle'. + +> Configure project :x-pack:plugin:vector-tile +Evaluating project ':x-pack:plugin:vector-tile' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/vector-tile/build.gradle'. + +> Configure project :x-pack:plugin:voting-only-node +Evaluating project ':x-pack:plugin:voting-only-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/voting-only-node/build.gradle'. + +> Configure project :x-pack:plugin:watcher +Evaluating project ':x-pack:plugin:watcher' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/watcher/build.gradle'. + +> Configure project :x-pack:plugin:wildcard +Evaluating project ':x-pack:plugin:wildcard' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/wildcard/build.gradle'. + +> Configure project :x-pack:plugin:write-load-forecaster +Evaluating project ':x-pack:plugin:write-load-forecaster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/write-load-forecaster/build.gradle'. + +> Configure project :x-pack:qa:core-rest-tests-with-security +Evaluating project ':x-pack:qa:core-rest-tests-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/core-rest-tests-with-security/build.gradle'. + +> Configure project :x-pack:qa:evil-tests +Evaluating project ':x-pack:qa:evil-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/evil-tests/build.gradle'. + +> Configure project :x-pack:qa:freeze-plugin +Evaluating project ':x-pack:qa:freeze-plugin' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/freeze-plugin/build.gradle'. + +> Configure project :x-pack:qa:full-cluster-restart +Evaluating project ':x-pack:qa:full-cluster-restart' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/full-cluster-restart/build.gradle'. + +> Configure project :x-pack:qa:kerberos-tests +Evaluating project ':x-pack:qa:kerberos-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/kerberos-tests/build.gradle'. + +> Configure project :x-pack:qa:mixed-tier-cluster +Evaluating project ':x-pack:qa:mixed-tier-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/mixed-tier-cluster/build.gradle'. + +> Configure project :x-pack:qa:multi-cluster-search-security +Evaluating project ':x-pack:qa:multi-cluster-search-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/multi-cluster-search-security/build.gradle'. + +> Configure project :x-pack:qa:multi-node +Evaluating project ':x-pack:qa:multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/multi-node/build.gradle'. + +> Configure project :x-pack:qa:oidc-op-tests +Evaluating project ':x-pack:qa:oidc-op-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/oidc-op-tests/build.gradle'. + +> Configure project :x-pack:qa:openldap-tests +Evaluating project ':x-pack:qa:openldap-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/openldap-tests/build.gradle'. + +> Configure project :x-pack:qa:password-protected-keystore +Evaluating project ':x-pack:qa:password-protected-keystore' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/password-protected-keystore/build.gradle'. + +> Configure project :x-pack:qa:reindex-tests-with-security +Evaluating project ':x-pack:qa:reindex-tests-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/reindex-tests-with-security/build.gradle'. + +> Configure project :x-pack:qa:repository-old-versions +Evaluating project ':x-pack:qa:repository-old-versions' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/repository-old-versions/build.gradle'. + +> Configure project :x-pack:qa:rolling-upgrade +Evaluating project ':x-pack:qa:rolling-upgrade' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/rolling-upgrade/build.gradle'. + +> Configure project :x-pack:qa:rolling-upgrade-basic +Evaluating project ':x-pack:qa:rolling-upgrade-basic' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/rolling-upgrade-basic/build.gradle'. + +> Configure project :x-pack:qa:rolling-upgrade-multi-cluster +Evaluating project ':x-pack:qa:rolling-upgrade-multi-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/rolling-upgrade-multi-cluster/build.gradle'. + +> Configure project :x-pack:qa:runtime-fields +Evaluating project ':x-pack:qa:runtime-fields' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/runtime-fields/build.gradle'. + +> Configure project :x-pack:qa:saml-idp-tests +Evaluating project ':x-pack:qa:saml-idp-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/saml-idp-tests/build.gradle'. + +> Configure project :x-pack:qa:security-example-spi-extension +Evaluating project ':x-pack:qa:security-example-spi-extension' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/security-example-spi-extension/build.gradle'. + +> Configure project :x-pack:qa:security-setup-password-tests +Evaluating project ':x-pack:qa:security-setup-password-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/security-setup-password-tests/build.gradle'. + +> Configure project :x-pack:qa:security-tools-tests +Evaluating project ':x-pack:qa:security-tools-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/security-tools-tests/build.gradle'. + +> Configure project :x-pack:qa:smoke-test-plugins +Evaluating project ':x-pack:qa:smoke-test-plugins' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/smoke-test-plugins/build.gradle'. + +> Configure project :x-pack:qa:smoke-test-plugins-ssl +Evaluating project ':x-pack:qa:smoke-test-plugins-ssl' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/smoke-test-plugins-ssl/build.gradle'. + +> Configure project :x-pack:qa:smoke-test-security-with-mustache +Evaluating project ':x-pack:qa:smoke-test-security-with-mustache' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/smoke-test-security-with-mustache/build.gradle'. + +> Configure project :x-pack:qa:third-party +Evaluating project ':x-pack:qa:third-party' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/third-party/build.gradle'. + +> Configure project :x-pack:test:idp-fixture +Evaluating project ':x-pack:test:idp-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/test/idp-fixture/build.gradle'. + +> Configure project :x-pack:test:smb-fixture +Evaluating project ':x-pack:test:smb-fixture' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/test/smb-fixture/build.gradle'. + +> Configure project :libs:entitlement:qa:common +Evaluating project ':libs:entitlement:qa:common' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/qa/common/build.gradle'. + +> Configure project :libs:entitlement:qa:entitlement-allowed +Evaluating project ':libs:entitlement:qa:entitlement-allowed' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/qa/entitlement-allowed/build.gradle'. + +> Configure project :libs:entitlement:qa:entitlement-allowed-nonmodular +Evaluating project ':libs:entitlement:qa:entitlement-allowed-nonmodular' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/qa/entitlement-allowed-nonmodular/build.gradle'. + +> Configure project :libs:entitlement:qa:entitlement-denied +Evaluating project ':libs:entitlement:qa:entitlement-denied' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/qa/entitlement-denied/build.gradle'. + +> Configure project :libs:entitlement:qa:entitlement-denied-nonmodular +Evaluating project ':libs:entitlement:qa:entitlement-denied-nonmodular' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/qa/entitlement-denied-nonmodular/build.gradle'. + +> Configure project :libs:entitlement:tools:common +Evaluating project ':libs:entitlement:tools:common' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/tools/common/build.gradle'. + +> Configure project :libs:entitlement:tools:public-callers-finder +Evaluating project ':libs:entitlement:tools:public-callers-finder' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/tools/public-callers-finder/build.gradle'. + +> Configure project :libs:entitlement:tools:securitymanager-scanner +Evaluating project ':libs:entitlement:tools:securitymanager-scanner' using build file '/Users/rene/dev/elastic/elasticsearch/libs/entitlement/tools/securitymanager-scanner/build.gradle'. + +> Configure project :modules:ingest-geoip:qa:file-based-update +Evaluating project ':modules:ingest-geoip:qa:file-based-update' using build file '/Users/rene/dev/elastic/elasticsearch/modules/ingest-geoip/qa/file-based-update/build.gradle'. + +> Configure project :modules:ingest-geoip:qa:full-cluster-restart +Evaluating project ':modules:ingest-geoip:qa:full-cluster-restart' using build file '/Users/rene/dev/elastic/elasticsearch/modules/ingest-geoip/qa/full-cluster-restart/build.gradle'. + +> Configure project :plugins:discovery-ec2:qa:amazon-ec2 +Evaluating project ':plugins:discovery-ec2:qa:amazon-ec2' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/discovery-ec2/qa/amazon-ec2/build.gradle'. + +> Configure project :plugins:discovery-gce:qa:gce +Evaluating project ':plugins:discovery-gce:qa:gce' using build file '/Users/rene/dev/elastic/elasticsearch/plugins/discovery-gce/qa/gce/build.gradle'. + +> Configure project :x-pack:plugin:async-search:qa +Evaluating project ':x-pack:plugin:async-search:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/async-search/qa/build.gradle'. + +> Configure project :x-pack:plugin:autoscaling:qa +Evaluating project ':x-pack:plugin:autoscaling:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/autoscaling/qa/build.gradle'. + +> Configure project :x-pack:plugin:ccr:qa +Evaluating project ':x-pack:plugin:ccr:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/qa/build.gradle'. + +> Configure project :x-pack:plugin:core:template-resources +Evaluating project ':x-pack:plugin:core:template-resources' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/core/template-resources/build.gradle'. + +> Configure project :x-pack:plugin:deprecation:qa +Evaluating project ':x-pack:plugin:deprecation:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/deprecation/qa/build.gradle'. + +> Configure project :x-pack:plugin:downsample:qa +Evaluating project ':x-pack:plugin:downsample:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/downsample/qa/build.gradle'. + +> Configure project :x-pack:plugin:enrich:qa +Evaluating project ':x-pack:plugin:enrich:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/enrich/qa/build.gradle'. + +> Configure project :x-pack:plugin:ent-search:qa +Evaluating project ':x-pack:plugin:ent-search:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ent-search/qa/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa +Evaluating project ':x-pack:plugin:eql:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/build.gradle'. + +> Configure project :x-pack:plugin:esql:arrow +Evaluating project ':x-pack:plugin:esql:arrow' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/arrow/build.gradle'. + +> Configure project :x-pack:plugin:esql:compute +Evaluating project ':x-pack:plugin:esql:compute' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/compute/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa +Evaluating project ':x-pack:plugin:esql:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/build.gradle'. + +> Configure project :x-pack:plugin:esql-core:test-fixtures +Evaluating project ':x-pack:plugin:esql-core:test-fixtures' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql-core/test-fixtures/build.gradle'. + +> Configure project :x-pack:plugin:fleet:qa +Evaluating project ':x-pack:plugin:fleet:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/fleet/qa/build.gradle'. + +> Configure project :x-pack:plugin:graph:qa +Evaluating project ':x-pack:plugin:graph:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/graph/qa/build.gradle'. + +> Configure project :x-pack:plugin:identity-provider:qa +Evaluating project ':x-pack:plugin:identity-provider:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/identity-provider/qa/build.gradle'. + +> Configure project :x-pack:plugin:ilm:qa +Evaluating project ':x-pack:plugin:ilm:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ilm/qa/build.gradle'. + +> Configure project :x-pack:plugin:inference:qa +Evaluating project ':x-pack:plugin:inference:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/inference/qa/build.gradle'. + +> Configure project :x-pack:plugin:logsdb:qa +Evaluating project ':x-pack:plugin:logsdb:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/logsdb/qa/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa +Evaluating project ':x-pack:plugin:ml:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/build.gradle'. + +> Configure project :x-pack:plugin:ql:test-fixtures +Evaluating project ':x-pack:plugin:ql:test-fixtures' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ql/test-fixtures/build.gradle'. + +> Configure project :x-pack:plugin:repositories-metering-api:qa +Evaluating project ':x-pack:plugin:repositories-metering-api:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/repositories-metering-api/qa/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa +Evaluating project ':x-pack:plugin:searchable-snapshots:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/build.gradle'. + +> Configure project :x-pack:plugin:security:cli +Evaluating project ':x-pack:plugin:security:cli' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/cli/build.gradle'. + +> Configure project :x-pack:plugin:security:lib +Evaluating project ':x-pack:plugin:security:lib' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/lib/build.gradle'. + +> Configure project :x-pack:plugin:security:qa +Evaluating project ':x-pack:plugin:security:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/build.gradle'. + +> Configure project :x-pack:plugin:shutdown:qa +Evaluating project ':x-pack:plugin:shutdown:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/shutdown/qa/build.gradle'. + +> Configure project :x-pack:plugin:slm:qa +Evaluating project ':x-pack:plugin:slm:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/slm/qa/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-based-recoveries:qa +Evaluating project ':x-pack:plugin:snapshot-based-recoveries:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-based-recoveries/qa/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit:qa +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/qa/build.gradle'. + +> Configure project :x-pack:plugin:sql:jdbc +Evaluating project ':x-pack:plugin:sql:jdbc' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/jdbc/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa +Evaluating project ':x-pack:plugin:sql:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/build.gradle'. + +> Configure project :x-pack:plugin:sql:sql-action +Evaluating project ':x-pack:plugin:sql:sql-action' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/sql-action/build.gradle'. + +> Configure project :x-pack:plugin:sql:sql-cli +Evaluating project ':x-pack:plugin:sql:sql-cli' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/sql-cli/build.gradle'. + +> Configure project :x-pack:plugin:sql:sql-client +Evaluating project ':x-pack:plugin:sql:sql-client' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/sql-client/build.gradle'. + +> Configure project :x-pack:plugin:sql:sql-proto +Evaluating project ':x-pack:plugin:sql:sql-proto' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/sql-proto/build.gradle'. + +> Configure project :x-pack:plugin:stack:qa +Evaluating project ':x-pack:plugin:stack:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/stack/qa/build.gradle'. + +> Configure project :x-pack:plugin:text-structure:qa +Evaluating project ':x-pack:plugin:text-structure:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/text-structure/qa/build.gradle'. + +> Configure project :x-pack:plugin:transform:qa +Evaluating project ':x-pack:plugin:transform:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/transform/qa/build.gradle'. + +> Configure project :x-pack:plugin:vector-tile:qa +Evaluating project ':x-pack:plugin:vector-tile:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/vector-tile/qa/build.gradle'. + +> Configure project :x-pack:plugin:watcher:qa +Evaluating project ':x-pack:plugin:watcher:qa' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/watcher/qa/build.gradle'. + +> Configure project :x-pack:qa:multi-cluster-search-security:legacy-with-basic-license +Evaluating project ':x-pack:qa:multi-cluster-search-security:legacy-with-basic-license' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle'. + +> Configure project :x-pack:qa:multi-cluster-search-security:legacy-with-full-license +Evaluating project ':x-pack:qa:multi-cluster-search-security:legacy-with-full-license' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/multi-cluster-search-security/legacy-with-full-license/build.gradle'. + +> Configure project :x-pack:qa:multi-cluster-search-security:legacy-with-restricted-trust +Evaluating project ':x-pack:qa:multi-cluster-search-security:legacy-with-restricted-trust' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/multi-cluster-search-security/legacy-with-restricted-trust/build.gradle'. + +> Configure project :x-pack:qa:runtime-fields:core-with-mapped +Evaluating project ':x-pack:qa:runtime-fields:core-with-mapped' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/runtime-fields/core-with-mapped/build.gradle'. + +> Configure project :x-pack:qa:runtime-fields:core-with-search +Evaluating project ':x-pack:qa:runtime-fields:core-with-search' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/runtime-fields/core-with-search/build.gradle'. + +> Configure project :x-pack:qa:runtime-fields:with-security +Evaluating project ':x-pack:qa:runtime-fields:with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/runtime-fields/with-security/build.gradle'. + +> Configure project :x-pack:qa:third-party:active-directory +Evaluating project ':x-pack:qa:third-party:active-directory' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/third-party/active-directory/build.gradle'. + +> Configure project :x-pack:qa:third-party:jira +Evaluating project ':x-pack:qa:third-party:jira' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/third-party/jira/build.gradle'. + +> Configure project :x-pack:qa:third-party:pagerduty +Evaluating project ':x-pack:qa:third-party:pagerduty' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/third-party/pagerduty/build.gradle'. + +> Configure project :x-pack:qa:third-party:slack +Evaluating project ':x-pack:qa:third-party:slack' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/qa/third-party/slack/build.gradle'. + +> Configure project :x-pack:plugin:async-search:qa:rest +Evaluating project ':x-pack:plugin:async-search:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/async-search/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:async-search:qa:security +Evaluating project ':x-pack:plugin:async-search:qa:security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/async-search/qa/security/build.gradle'. + +> Configure project :x-pack:plugin:autoscaling:qa:rest +Evaluating project ':x-pack:plugin:autoscaling:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/autoscaling/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:ccr:qa:downgrade-to-basic-license +Evaluating project ':x-pack:plugin:ccr:qa:downgrade-to-basic-license' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle'. + +> Configure project :x-pack:plugin:ccr:qa:multi-cluster +Evaluating project ':x-pack:plugin:ccr:qa:multi-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/qa/multi-cluster/build.gradle'. + +> Configure project :x-pack:plugin:ccr:qa:non-compliant-license +Evaluating project ':x-pack:plugin:ccr:qa:non-compliant-license' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/qa/non-compliant-license/build.gradle'. + +> Configure project :x-pack:plugin:ccr:qa:rest +Evaluating project ':x-pack:plugin:ccr:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:ccr:qa:restart +Evaluating project ':x-pack:plugin:ccr:qa:restart' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/qa/restart/build.gradle'. + +> Configure project :x-pack:plugin:ccr:qa:security +Evaluating project ':x-pack:plugin:ccr:qa:security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ccr/qa/security/build.gradle'. + +> Configure project :x-pack:plugin:deprecation:qa:common +Evaluating project ':x-pack:plugin:deprecation:qa:common' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/deprecation/qa/common/build.gradle'. + +> Configure project :x-pack:plugin:deprecation:qa:early-deprecation-rest +Evaluating project ':x-pack:plugin:deprecation:qa:early-deprecation-rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/deprecation/qa/early-deprecation-rest/build.gradle'. + +> Configure project :x-pack:plugin:deprecation:qa:rest +Evaluating project ':x-pack:plugin:deprecation:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/deprecation/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:downsample:qa:mixed-cluster +Evaluating project ':x-pack:plugin:downsample:qa:mixed-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle'. + +> Configure project :x-pack:plugin:downsample:qa:rest +Evaluating project ':x-pack:plugin:downsample:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/downsample/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:downsample:qa:with-security +Evaluating project ':x-pack:plugin:downsample:qa:with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/downsample/qa/with-security/build.gradle'. + +> Configure project :x-pack:plugin:enrich:qa:common +Evaluating project ':x-pack:plugin:enrich:qa:common' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/enrich/qa/common/build.gradle'. + +> Configure project :x-pack:plugin:enrich:qa:rest +Evaluating project ':x-pack:plugin:enrich:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/enrich/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:enrich:qa:rest-with-advanced-security +Evaluating project ':x-pack:plugin:enrich:qa:rest-with-advanced-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/enrich/qa/rest-with-advanced-security/build.gradle'. + +> Configure project :x-pack:plugin:enrich:qa:rest-with-security +Evaluating project ':x-pack:plugin:enrich:qa:rest-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/enrich/qa/rest-with-security/build.gradle'. + +> Configure project :x-pack:plugin:ent-search:qa:full-cluster-restart +Evaluating project ':x-pack:plugin:ent-search:qa:full-cluster-restart' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ent-search/qa/full-cluster-restart/build.gradle'. + +> Configure project :x-pack:plugin:ent-search:qa:rest +Evaluating project ':x-pack:plugin:ent-search:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ent-search/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa:ccs-rolling-upgrade +Evaluating project ':x-pack:plugin:eql:qa:ccs-rolling-upgrade' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/ccs-rolling-upgrade/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa:common +Evaluating project ':x-pack:plugin:eql:qa:common' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/common/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa:correctness +Evaluating project ':x-pack:plugin:eql:qa:correctness' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/correctness/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa:mixed-node +Evaluating project ':x-pack:plugin:eql:qa:mixed-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/mixed-node/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa:multi-cluster-with-security +Evaluating project ':x-pack:plugin:eql:qa:multi-cluster-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/multi-cluster-with-security/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa:rest +Evaluating project ':x-pack:plugin:eql:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:eql:qa:security +Evaluating project ':x-pack:plugin:eql:qa:security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/eql/qa/security/build.gradle'. + +> Configure project :x-pack:plugin:esql:compute:ann +Evaluating project ':x-pack:plugin:esql:compute:ann' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/compute/ann/build.gradle'. + +> Configure project :x-pack:plugin:esql:compute:gen +Evaluating project ':x-pack:plugin:esql:compute:gen' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/compute/gen/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:action +Evaluating project ':x-pack:plugin:esql:qa:action' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/action/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:security +Evaluating project ':x-pack:plugin:esql:qa:security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/security/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:server +Evaluating project ':x-pack:plugin:esql:qa:server' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/server/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:testFixtures +Evaluating project ':x-pack:plugin:esql:qa:testFixtures' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/testFixtures/build.gradle'. + +> Configure project :x-pack:plugin:fleet:qa:rest +Evaluating project ':x-pack:plugin:fleet:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/fleet/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:graph:qa:with-security +Evaluating project ':x-pack:plugin:graph:qa:with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/graph/qa/with-security/build.gradle'. + +> Configure project :x-pack:plugin:identity-provider:qa:idp-rest-tests +Evaluating project ':x-pack:plugin:identity-provider:qa:idp-rest-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/identity-provider/qa/idp-rest-tests/build.gradle'. + +> Configure project :x-pack:plugin:ilm:qa:multi-cluster +Evaluating project ':x-pack:plugin:ilm:qa:multi-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ilm/qa/multi-cluster/build.gradle'. + +> Configure project :x-pack:plugin:ilm:qa:multi-node +Evaluating project ':x-pack:plugin:ilm:qa:multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ilm/qa/multi-node/build.gradle'. + +> Configure project :x-pack:plugin:ilm:qa:rest +Evaluating project ':x-pack:plugin:ilm:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ilm/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:ilm:qa:with-security +Evaluating project ':x-pack:plugin:ilm:qa:with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ilm/qa/with-security/build.gradle'. + +> Configure project :x-pack:plugin:inference:qa:inference-service-tests +Evaluating project ':x-pack:plugin:inference:qa:inference-service-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/inference/qa/inference-service-tests/build.gradle'. + +> Configure project :x-pack:plugin:inference:qa:mixed-cluster +Evaluating project ':x-pack:plugin:inference:qa:mixed-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/inference/qa/mixed-cluster/build.gradle'. + +> Configure project :x-pack:plugin:inference:qa:rolling-upgrade +Evaluating project ':x-pack:plugin:inference:qa:rolling-upgrade' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/inference/qa/rolling-upgrade/build.gradle'. + +> Configure project :x-pack:plugin:inference:qa:test-service-plugin +Evaluating project ':x-pack:plugin:inference:qa:test-service-plugin' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/inference/qa/test-service-plugin/build.gradle'. + +> Configure project :x-pack:plugin:logsdb:qa:with-basic +Evaluating project ':x-pack:plugin:logsdb:qa:with-basic' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/logsdb/qa/with-basic/build.gradle'. + +> Configure project :x-pack:plugin:logsdb:qa:with-custom-cutoff +Evaluating project ':x-pack:plugin:logsdb:qa:with-custom-cutoff' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/logsdb/qa/with-custom-cutoff/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:basic-multi-node +Evaluating project ':x-pack:plugin:ml:qa:basic-multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/basic-multi-node/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:disabled +Evaluating project ':x-pack:plugin:ml:qa:disabled' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/disabled/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:ml-inference-service-tests +Evaluating project ':x-pack:plugin:ml:qa:ml-inference-service-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/ml-inference-service-tests/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:ml-with-security +Evaluating project ':x-pack:plugin:ml:qa:ml-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/ml-with-security/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:multi-cluster-tests-with-security +Evaluating project ':x-pack:plugin:ml:qa:multi-cluster-tests-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:native-multi-node-tests +Evaluating project ':x-pack:plugin:ml:qa:native-multi-node-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:no-bootstrap-tests +Evaluating project ':x-pack:plugin:ml:qa:no-bootstrap-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle'. + +> Configure project :x-pack:plugin:ml:qa:single-node-tests +Evaluating project ':x-pack:plugin:ml:qa:single-node-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/ml/qa/single-node-tests/build.gradle'. + +> Configure project :x-pack:plugin:repositories-metering-api:qa:azure +Evaluating project ':x-pack:plugin:repositories-metering-api:qa:azure' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle'. + +> Configure project :x-pack:plugin:repositories-metering-api:qa:gcs +Evaluating project ':x-pack:plugin:repositories-metering-api:qa:gcs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/repositories-metering-api/qa/gcs/build.gradle'. + +> Configure project :x-pack:plugin:repositories-metering-api:qa:s3 +Evaluating project ':x-pack:plugin:repositories-metering-api:qa:s3' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/repositories-metering-api/qa/s3/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa:azure +Evaluating project ':x-pack:plugin:searchable-snapshots:qa:azure' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa:gcs +Evaluating project ':x-pack:plugin:searchable-snapshots:qa:gcs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/gcs/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa:hdfs +Evaluating project ':x-pack:plugin:searchable-snapshots:qa:hdfs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/hdfs/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa:minio +Evaluating project ':x-pack:plugin:searchable-snapshots:qa:minio' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/minio/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa:rest +Evaluating project ':x-pack:plugin:searchable-snapshots:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa:s3 +Evaluating project ':x-pack:plugin:searchable-snapshots:qa:s3' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle'. + +> Configure project :x-pack:plugin:searchable-snapshots:qa:url +Evaluating project ':x-pack:plugin:searchable-snapshots:qa:url' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/searchable-snapshots/qa/url/build.gradle'. + +> Configure project :x-pack:plugin:security:lib:nimbus-jose-jwt-modified +Evaluating project ':x-pack:plugin:security:lib:nimbus-jose-jwt-modified' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/lib/nimbus-jose-jwt-modified/build.gradle'. + +> Configure project :x-pack:plugin:security:lib:nimbus-jose-jwt-modified-part1 +Evaluating project ':x-pack:plugin:security:lib:nimbus-jose-jwt-modified-part1' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part1/build.gradle'. + +> Configure project :x-pack:plugin:security:lib:nimbus-jose-jwt-modified-part2 +Evaluating project ':x-pack:plugin:security:lib:nimbus-jose-jwt-modified-part2' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/lib/nimbus-jose-jwt-modified-part2/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:audit +Evaluating project ':x-pack:plugin:security:qa:audit' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/audit/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:basic-enable-security +Evaluating project ':x-pack:plugin:security:qa:basic-enable-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/basic-enable-security/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:consistency-checks +Evaluating project ':x-pack:plugin:security:qa:consistency-checks' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/consistency-checks/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:jwt-realm +Evaluating project ':x-pack:plugin:security:qa:jwt-realm' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/jwt-realm/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:multi-cluster +Evaluating project ':x-pack:plugin:security:qa:multi-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/multi-cluster/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:operator-privileges-tests +Evaluating project ':x-pack:plugin:security:qa:operator-privileges-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/operator-privileges-tests/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:profile +Evaluating project ':x-pack:plugin:security:qa:profile' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/profile/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:saml-rest-tests +Evaluating project ':x-pack:plugin:security:qa:saml-rest-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/saml-rest-tests/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:secondary-auth-actions +Evaluating project ':x-pack:plugin:security:qa:secondary-auth-actions' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/secondary-auth-actions/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:security-basic +Evaluating project ':x-pack:plugin:security:qa:security-basic' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/security-basic/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:security-disabled +Evaluating project ':x-pack:plugin:security:qa:security-disabled' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/security-disabled/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:security-trial +Evaluating project ':x-pack:plugin:security:qa:security-trial' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/security-trial/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:service-account +Evaluating project ':x-pack:plugin:security:qa:service-account' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/service-account/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:smoke-test-all-realms +Evaluating project ':x-pack:plugin:security:qa:smoke-test-all-realms' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/smoke-test-all-realms/build.gradle'. + +> Configure project :x-pack:plugin:security:qa:tls-basic +Evaluating project ':x-pack:plugin:security:qa:tls-basic' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/security/qa/tls-basic/build.gradle'. + +> Configure project :x-pack:plugin:shutdown:qa:full-cluster-restart +Evaluating project ':x-pack:plugin:shutdown:qa:full-cluster-restart' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/shutdown/qa/full-cluster-restart/build.gradle'. + +> Configure project :x-pack:plugin:shutdown:qa:multi-node +Evaluating project ':x-pack:plugin:shutdown:qa:multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/shutdown/qa/multi-node/build.gradle'. + +> Configure project :x-pack:plugin:shutdown:qa:rolling-upgrade +Evaluating project ':x-pack:plugin:shutdown:qa:rolling-upgrade' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/shutdown/qa/rolling-upgrade/build.gradle'. + +> Configure project :x-pack:plugin:slm:qa:multi-node +Evaluating project ':x-pack:plugin:slm:qa:multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/slm/qa/multi-node/build.gradle'. + +> Configure project :x-pack:plugin:slm:qa:rest +Evaluating project ':x-pack:plugin:slm:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/slm/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:slm:qa:with-security +Evaluating project ':x-pack:plugin:slm:qa:with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/slm/qa/with-security/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-based-recoveries:qa:azure +Evaluating project ':x-pack:plugin:snapshot-based-recoveries:qa:azure' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-based-recoveries/qa/azure/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-based-recoveries:qa:fs +Evaluating project ':x-pack:plugin:snapshot-based-recoveries:qa:fs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-based-recoveries/qa/fs/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-based-recoveries:qa:gcs +Evaluating project ':x-pack:plugin:snapshot-based-recoveries:qa:gcs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-based-recoveries/qa/gcs/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-based-recoveries:qa:license-enforcing +Evaluating project ':x-pack:plugin:snapshot-based-recoveries:qa:license-enforcing' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-based-recoveries/qa/license-enforcing/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-based-recoveries:qa:s3 +Evaluating project ':x-pack:plugin:snapshot-based-recoveries:qa:s3' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-based-recoveries/qa/s3/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit:qa:azure +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit:qa:azure' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/qa/azure/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit:qa:gcs +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit:qa:gcs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/qa/gcs/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit:qa:hdfs +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit:qa:hdfs' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/qa/hdfs/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit:qa:minio +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit:qa:minio' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/qa/minio/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit:qa:rest +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:snapshot-repo-test-kit:qa:s3 +Evaluating project ':x-pack:plugin:snapshot-repo-test-kit:qa:s3' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/snapshot-repo-test-kit/qa/s3/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:jdbc +Evaluating project ':x-pack:plugin:sql:qa:jdbc' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/jdbc/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:mixed-node +Evaluating project ':x-pack:plugin:sql:qa:mixed-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/mixed-node/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:server +Evaluating project ':x-pack:plugin:sql:qa:server' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/server/build.gradle'. + +> Configure project :x-pack:plugin:stack:qa:rest +Evaluating project ':x-pack:plugin:stack:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/stack/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:text-structure:qa:text-structure-with-security +Evaluating project ':x-pack:plugin:text-structure:qa:text-structure-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/text-structure/qa/text-structure-with-security/build.gradle'. + +> Configure project :x-pack:plugin:transform:qa:common +Evaluating project ':x-pack:plugin:transform:qa:common' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/transform/qa/common/build.gradle'. + +> Configure project :x-pack:plugin:transform:qa:multi-cluster-tests-with-security +Evaluating project ':x-pack:plugin:transform:qa:multi-cluster-tests-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle'. + +> Configure project :x-pack:plugin:transform:qa:multi-node-tests +Evaluating project ':x-pack:plugin:transform:qa:multi-node-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/transform/qa/multi-node-tests/build.gradle'. + +> Configure project :x-pack:plugin:transform:qa:single-node-tests +Evaluating project ':x-pack:plugin:transform:qa:single-node-tests' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/transform/qa/single-node-tests/build.gradle'. + +> Configure project :x-pack:plugin:vector-tile:qa:multi-cluster +Evaluating project ':x-pack:plugin:vector-tile:qa:multi-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/vector-tile/qa/multi-cluster/build.gradle'. + +> Configure project :x-pack:plugin:watcher:qa:common +Evaluating project ':x-pack:plugin:watcher:qa:common' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/watcher/qa/common/build.gradle'. + +> Configure project :x-pack:plugin:watcher:qa:rest +Evaluating project ':x-pack:plugin:watcher:qa:rest' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/watcher/qa/rest/build.gradle'. + +> Configure project :x-pack:plugin:watcher:qa:with-security +Evaluating project ':x-pack:plugin:watcher:qa:with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/watcher/qa/with-security/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:server:mixed-cluster +Evaluating project ':x-pack:plugin:esql:qa:server:mixed-cluster' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:server:multi-clusters +Evaluating project ':x-pack:plugin:esql:qa:server:multi-clusters' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:server:multi-node +Evaluating project ':x-pack:plugin:esql:qa:server:multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/server/multi-node/build.gradle'. + +> Configure project :x-pack:plugin:esql:qa:server:single-node +Evaluating project ':x-pack:plugin:esql:qa:server:single-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/esql/qa/server/single-node/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:jdbc:multi-node +Evaluating project ':x-pack:plugin:sql:qa:jdbc:multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/jdbc/multi-node/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:jdbc:no-sql +Evaluating project ':x-pack:plugin:sql:qa:jdbc:no-sql' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/jdbc/no-sql/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:jdbc:security +Evaluating project ':x-pack:plugin:sql:qa:jdbc:security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/jdbc/security/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:jdbc:single-node +Evaluating project ':x-pack:plugin:sql:qa:jdbc:single-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/jdbc/single-node/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:server:multi-cluster-with-security +Evaluating project ':x-pack:plugin:sql:qa:server:multi-cluster-with-security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/server/multi-cluster-with-security/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:server:multi-node +Evaluating project ':x-pack:plugin:sql:qa:server:multi-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/server/multi-node/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:server:security +Evaluating project ':x-pack:plugin:sql:qa:server:security' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/server/security/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:server:single-node +Evaluating project ':x-pack:plugin:sql:qa:server:single-node' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/server/single-node/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:jdbc:security:with-ssl +Evaluating project ':x-pack:plugin:sql:qa:jdbc:security:with-ssl' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/jdbc/security/with-ssl/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:jdbc:security:without-ssl +Evaluating project ':x-pack:plugin:sql:qa:jdbc:security:without-ssl' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/jdbc/security/without-ssl/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:server:security:with-ssl +Evaluating project ':x-pack:plugin:sql:qa:server:security:with-ssl' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/server/security/with-ssl/build.gradle'. + +> Configure project :x-pack:plugin:sql:qa:server:security:without-ssl +Evaluating project ':x-pack:plugin:sql:qa:server:security:without-ssl' using build file '/Users/rene/dev/elastic/elasticsearch/x-pack/plugin/sql/qa/server/security/without-ssl/build.gradle'. +All projects evaluated. +test#with21 +Task name matched 'cleanTest' +Task name matched 'test' +Selected primary task 'cleanTest' from project :libs:simdvec +Selected primary task 'test' from project :libs:simdvec +Tasks to be executed: [task ':libs:simdvec:cleanTest', task ':libs:logging:compileJava', task ':libs:core:compileJava', task ':libs:native:compileJava', task ':libs:simdvec:compileJava', task ':libs:simdvec:processResources', task ':libs:simdvec:classes', task ':libs:simdvec:compileMain21Java', task ':libs:simdvec:processMain21Resources', task ':libs:simdvec:main21Classes', task ':libs:simdvec:compileMain22Java', task ':libs:simdvec:processMain22Resources', task ':libs:simdvec:main22Classes', task ':libs:simdvec:jar', task ':libs:cli:compileJava', task ':libs:x-content:compileJava', task ':libs:entitlement:bridge:compileJava', task ':libs:entitlement:compileJava', task ':libs:geo:compileJava', task ':libs:grok:compileJava', task ':libs:lz4:compileJava', task ':libs:plugin-api:compileJava', task ':libs:plugin-analysis-api:compileJava', task ':libs:secure-sm:compileJava', task ':libs:tdigest:compileJava', task ':server:compileJava', task ':server:processResources', task ':server:classes', task ':server:generateModulesList', task ':server:generatePluginsList', task ':server:jar', task ':client:rest:compileJava', task ':client:rest:processResources', task ':client:rest:classes', task ':client:rest:jar', task ':libs:cli:processResources', task ':libs:cli:classes', task ':libs:cli:jar', task ':libs:core:processResources', task ':libs:core:classes', task ':libs:core:jar', task ':libs:entitlement:asm-provider:compileJava', task ':libs:entitlement:asm-provider:processResources', task ':libs:entitlement:asm-provider:classes', task ':libs:entitlement:asm-provider:jar', task ':libs:entitlement:generateAsm-providerProviderManifest', task ':libs:entitlement:generateAsm-providerProviderImpl', task ':libs:entitlement:processResources', task ':libs:entitlement:classes', task ':libs:entitlement:bridge:processResources', task ':libs:entitlement:bridge:classes', task ':libs:entitlement:bridge:compileMain23Java', task ':libs:entitlement:bridge:processMain23Resources', task ':libs:entitlement:bridge:main23Classes', task ':libs:entitlement:bridge:java23Jar', task ':libs:entitlement:compileMain23Java', task ':libs:entitlement:processMain23Resources', task ':libs:entitlement:main23Classes', task ':libs:entitlement:jar', task ':libs:geo:processResources', task ':libs:geo:classes', task ':libs:geo:jar', task ':libs:grok:processResources', task ':libs:grok:classes', task ':libs:grok:jar', task ':libs:logging:processResources', task ':libs:logging:classes', task ':libs:logging:jar', task ':libs:lz4:processResources', task ':libs:lz4:classes', task ':libs:lz4:jar', task ':libs:native:processResources', task ':libs:native:classes', task ':libs:native:compileMain22Java', task ':libs:native:processMain22Resources', task ':libs:native:main22Classes', task ':libs:native:jar', task ':libs:plugin-analysis-api:processResources', task ':libs:plugin-analysis-api:classes', task ':libs:plugin-analysis-api:jar', task ':libs:plugin-api:processResources', task ':libs:plugin-api:classes', task ':libs:plugin-api:jar', task ':libs:secure-sm:processResources', task ':libs:secure-sm:classes', task ':libs:secure-sm:jar', task ':libs:ssl-config:compileJava', task ':modules:transport-netty4:compileJava', task ':test:framework:compileJava', task ':libs:simdvec:compileTestJava', task ':libs:simdvec:processTestResources', task ':libs:simdvec:testClasses', task ':libs:ssl-config:processResources', task ':libs:ssl-config:classes', task ':libs:ssl-config:jar', task ':libs:tdigest:processResources', task ':libs:tdigest:classes', task ':libs:tdigest:jar', task ':libs:x-content:impl:compileJava', task ':libs:x-content:impl:processResources', task ':libs:x-content:impl:classes', task ':libs:x-content:impl:jar', task ':libs:x-content:generateImplProviderManifest', task ':libs:x-content:generateImplProviderImpl', task ':libs:x-content:processResources', task ':libs:x-content:classes', task ':libs:x-content:jar', task ':modules:transport-netty4:processResources', task ':modules:transport-netty4:classes', task ':modules:transport-netty4:jar', task ':test:framework:processResources', task ':test:framework:classes', task ':test:framework:jar', task ':test:immutable-collections-patch:compileJava', task ':test:immutable-collections-patch:processResources', task ':test:immutable-collections-patch:classes', task ':test:immutable-collections-patch:generatePatch', task ':libs:native:native-libraries:extractLibs', task ':libs:simdvec:test'] +Tasks that were excluded: [] +======================================= +Elasticsearch Build Hamster says Hello! + Gradle Version : 8.11.1 + OS Info : Mac OS X 15.2 (aarch64) + JDK Version : 21.0.5+9-LTS-239 (Oracle) + JAVA_HOME : /Users/rene/.sdkman/candidates/java/21.0.5-oracle + Random Testing Seed : 7B469FBE8B6D0C65 + In FIPS 140 mode : false +======================================= +Ignoring listeners of task graph ready event, as this build (:) has already executed work. +Resolve mutations for :libs:simdvec:cleanTest (Thread[#1322,Execution worker,5,main]) started. +work action Unknown value (Thread[#1392,included builds Thread 2,5,main]) started. +work action Unknown value (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:simdvec:cleanTest (Thread[#1322,Execution worker,5,main]) started. +destroyer locations for task group 0 (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :libs:logging:compileJava (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :libs:simdvec:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. +Resolve mutations for :libs:simdvec:processMain22Resources (Thread[#1329,Execution worker Thread 8,5,main]) started. +Resolve mutations for :libs:simdvec:processMain21Resources (Thread[#1324,Execution worker Thread 3,5,main]) started. +Resolve mutations for :libs:grok:compileJava (Thread[#1327,Execution worker Thread 6,5,main]) started. +:libs:simdvec:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. +:libs:simdvec:processMain21Resources (Thread[#1324,Execution worker Thread 3,5,main]) started. +:libs:simdvec:processMain22Resources (Thread[#1329,Execution worker Thread 8,5,main]) started. +Resolve mutations for :libs:geo:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :libs:secure-sm:compileJava (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :libs:entitlement:bridge:compileJava (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :server:generateModulesList (Thread[#1392,included builds Thread 2,5,main]) started. +:libs:secure-sm:compileJava (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :server:processResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:logging:compileJava (Thread[#1323,Execution worker Thread 2,5,main]) started. +Resolve mutations for :libs:plugin-api:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. +:server:processResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +:server:generateModulesList (Thread[#1392,included builds Thread 2,5,main]) started. +:libs:entitlement:bridge:compileJava (Thread[#1325,Execution worker Thread 4,5,main]) started. +:libs:geo:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:plugin-api:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:grok:compileJava (Thread[#1327,Execution worker Thread 6,5,main]) started. + +> Task :libs:simdvec:processMain22Resources NO-SOURCE +Skipping task ':libs:simdvec:processMain22Resources' as it has no source files and no previous output files. + +> Task :libs:simdvec:processMain21Resources NO-SOURCE +Skipping task ':libs:simdvec:processMain21Resources' as it has no source files and no previous output files. + +> Task :server:generateModulesList UP-TO-DATE +Caching disabled for task ':server:generateModulesList' because: + Build cache is disabled + Gradle would require more information to cache this task +Skipping task ':server:generateModulesList' as it is up-to-date. + +> Task :libs:simdvec:processResources NO-SOURCE +Skipping task ':libs:simdvec:processResources' as it has no source files and no previous output files. +Resolve mutations for :client:rest:processResources (Thread[#1392,included builds Thread 2,5,main]) started. + +> Task :libs:grok:compileJava UP-TO-DATE +Caching disabled for task ':libs:grok:compileJava' because: + Build cache is disabled +Skipping task ':libs:grok:compileJava' as it is up-to-date. +:client:rest:processResources (Thread[#1392,included builds Thread 2,5,main]) started. +Resolve mutations for :libs:cli:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. +Resolve mutations for :client:rest:compileJava (Thread[#1324,Execution worker Thread 3,5,main]) started. + +> Task :libs:plugin-api:compileJava UP-TO-DATE +Caching disabled for task ':libs:plugin-api:compileJava' because: + Build cache is disabled +Skipping task ':libs:plugin-api:compileJava' as it is up-to-date. +No compile result for :libs:plugin-api:compileJava +No compile result for :libs:plugin-api:compileJava +Resolve mutations for :server:generatePluginsList (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :libs:secure-sm:compileJava UP-TO-DATE +Caching disabled for task ':libs:secure-sm:compileJava' because: + Build cache is disabled +Skipping task ':libs:secure-sm:compileJava' as it is up-to-date. +No compile result for :libs:secure-sm:compileJava +No compile result for :libs:secure-sm:compileJava +No compile result for :libs:plugin-api:compileJava +No compile result for :libs:plugin-api:compileJava +:libs:cli:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. + +> Task :libs:geo:compileJava UP-TO-DATE +Caching disabled for task ':libs:geo:compileJava' because: + Build cache is disabled +Skipping task ':libs:geo:compileJava' as it is up-to-date. + +> Task :libs:entitlement:bridge:compileJava UP-TO-DATE +Caching disabled for task ':libs:entitlement:bridge:compileJava' because: + Build cache is disabled +Skipping task ':libs:entitlement:bridge:compileJava' as it is up-to-date. +No compile result for :libs:geo:compileJava +No compile result for :libs:geo:compileJava +No compile result for :libs:grok:compileJava +No compile result for :libs:grok:compileJava + +> Task :server:processResources UP-TO-DATE +Caching disabled for task ':server:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':server:processResources' as it is up-to-date. +No compile result for :libs:grok:compileJava +work action resolve main (project :libs:plugin-api) (Thread[#1330,Execution worker Thread 9,5,main]) started. +No compile result for :libs:geo:compileJava +No compile result for :libs:geo:compileJava +No compile result for :libs:entitlement:bridge:compileJava +No compile result for :libs:entitlement:bridge:compileJava +No compile result for :libs:entitlement:bridge:compileJava +No compile result for :libs:entitlement:bridge:compileJava + +> Task :libs:logging:compileJava UP-TO-DATE +Caching disabled for task ':libs:logging:compileJava' because: + Build cache is disabled +Skipping task ':libs:logging:compileJava' as it is up-to-date. +work action resolve main (project :libs:entitlement:bridge) (Thread[#1325,Execution worker Thread 4,5,main]) started. +work action resolve main (project :libs:plugin-api) (Thread[#1331,Execution worker Thread 10,5,main]) started. +No compile result for :libs:secure-sm:compileJava +:server:generatePluginsList (Thread[#1329,Execution worker Thread 8,5,main]) started. +:client:rest:compileJava (Thread[#1324,Execution worker Thread 3,5,main]) started. +No compile result for :libs:secure-sm:compileJava +Resolve mutations for :libs:plugin-analysis-api:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. +work action resolve main (project :libs:entitlement:bridge) (Thread[#1325,Execution worker Thread 4,5,main]) started. +No compile result for :libs:logging:compileJava +work action resolve main (project :libs:geo) (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :libs:core:processResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +No compile result for :libs:grok:compileJava +work action resolve main (project :libs:grok) (Thread[#1330,Execution worker Thread 9,5,main]) started. +work action resolve main (project :libs:grok) (Thread[#1327,Execution worker Thread 6,5,main]) started. +:libs:plugin-analysis-api:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. +Resolve mutations for :libs:entitlement:asm-provider:processResources (Thread[#1327,Execution worker Thread 6,5,main]) started. +work action resolve main (project :libs:secure-sm) (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:entitlement:asm-provider:processResources (Thread[#1327,Execution worker Thread 6,5,main]) started. +No compile result for :libs:logging:compileJava +work action resolve main (project :libs:geo) (Thread[#1325,Execution worker Thread 4,5,main]) started. +work action resolve main (project :libs:secure-sm) (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :libs:entitlement:bridge:processResources (Thread[#1325,Execution worker Thread 4,5,main]) started. +No compile result for :libs:logging:compileJava +No compile result for :libs:logging:compileJava +:libs:entitlement:bridge:processResources (Thread[#1325,Execution worker Thread 4,5,main]) started. +work action resolve main (project :libs:logging) (Thread[#1323,Execution worker Thread 2,5,main]) started. +Resolve mutations for :libs:entitlement:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:entitlement:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:cli:processResources NO-SOURCE +Skipping task ':libs:cli:processResources' as it has no source files and no previous output files. +:libs:core:processResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +Resolve mutations for :libs:entitlement:processMain23Resources (Thread[#1326,Execution worker Thread 5,5,main]) started. + +> Task :server:generatePluginsList UP-TO-DATE +Caching disabled for task ':server:generatePluginsList' because: + Build cache is disabled + Gradle would require more information to cache this task +Skipping task ':server:generatePluginsList' as it is up-to-date. +Resolve mutations for :libs:geo:processResources (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:entitlement:processMain23Resources (Thread[#1326,Execution worker Thread 5,5,main]) started. +work action resolve main (project :libs:logging) (Thread[#1323,Execution worker Thread 2,5,main]) started. +Resolve mutations for :libs:core:compileJava (Thread[#1323,Execution worker Thread 2,5,main]) started. +Resolve mutations for :libs:entitlement:bridge:processMain23Resources (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :libs:entitlement:processResources NO-SOURCE +Skipping task ':libs:entitlement:processResources' as it has no source files and no previous output files. +Resolve mutations for :libs:grok:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:entitlement:bridge:processResources NO-SOURCE +Skipping task ':libs:entitlement:bridge:processResources' as it has no source files and no previous output files. + +> Task :libs:plugin-analysis-api:compileJava UP-TO-DATE +Caching disabled for task ':libs:plugin-analysis-api:compileJava' because: + Build cache is disabled +Skipping task ':libs:plugin-analysis-api:compileJava' as it is up-to-date. +:libs:grok:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:entitlement:processMain23Resources NO-SOURCE +Skipping task ':libs:entitlement:processMain23Resources' as it has no source files and no previous output files. +:libs:geo:processResources (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :client:rest:processResources UP-TO-DATE +Caching disabled for task ':client:rest:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':client:rest:processResources' as it is up-to-date. +:libs:core:compileJava (Thread[#1323,Execution worker Thread 2,5,main]) started. + +> Task :client:rest:compileJava UP-TO-DATE +Caching disabled for task ':client:rest:compileJava' because: + Build cache is disabled +Skipping task ':client:rest:compileJava' as it is up-to-date. +No compile result for :client:rest:compileJava +No compile result for :libs:plugin-analysis-api:compileJava +Resolve mutations for :libs:entitlement:bridge:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. +:libs:entitlement:bridge:processMain23Resources (Thread[#1332,Execution worker Thread 11,5,main]) started. +No compile result for :libs:plugin-analysis-api:compileJava +Resolve mutations for :libs:lz4:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. +Resolve mutations for :libs:logging:processResources (Thread[#1392,included builds Thread 2,5,main]) started. +No compile result for :client:rest:compileJava +:libs:logging:processResources (Thread[#1392,included builds Thread 2,5,main]) started. + +> Task :libs:geo:processResources NO-SOURCE +Skipping task ':libs:geo:processResources' as it has no source files and no previous output files. + +> Task :libs:core:processResources NO-SOURCE +Skipping task ':libs:core:processResources' as it has no source files and no previous output files. +No compile result for :client:rest:compileJava +:libs:lz4:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. +No compile result for :libs:plugin-analysis-api:compileJava + +> Task :libs:entitlement:asm-provider:processResources UP-TO-DATE +Caching disabled for task ':libs:entitlement:asm-provider:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:entitlement:asm-provider:processResources' as it is up-to-date. + +> Task :libs:grok:processResources UP-TO-DATE +Caching disabled for task ':libs:grok:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:grok:processResources' as it is up-to-date. +:libs:entitlement:bridge:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:native:processMain22Resources (Thread[#1327,Execution worker Thread 6,5,main]) started. +Resolve mutations for :libs:grok:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. +No compile result for :libs:plugin-analysis-api:compileJava + +> Task :libs:entitlement:bridge:processMain23Resources NO-SOURCE +Skipping task ':libs:entitlement:bridge:processMain23Resources' as it has no source files and no previous output files. +:libs:grok:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. +work action resolve main (project :libs:plugin-analysis-api) (Thread[#1331,Execution worker Thread 10,5,main]) started. +work action resolve main (project :libs:plugin-analysis-api) (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :libs:plugin-analysis-api:processResources (Thread[#1331,Execution worker Thread 10,5,main]) started. +Resolve mutations for :libs:plugin-api:processResources (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :libs:logging:processResources NO-SOURCE +Skipping task ':libs:logging:processResources' as it has no source files and no previous output files. +No compile result for :client:rest:compileJava +:libs:plugin-analysis-api:processResources (Thread[#1331,Execution worker Thread 10,5,main]) started. +Resolve mutations for :client:rest:classes (Thread[#1324,Execution worker Thread 3,5,main]) started. +Resolve mutations for :libs:logging:classes (Thread[#1392,included builds Thread 2,5,main]) started. +:client:rest:classes (Thread[#1324,Execution worker Thread 3,5,main]) started. +Resolve mutations for :libs:native:processResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:logging:classes (Thread[#1392,included builds Thread 2,5,main]) started. +Resolve mutations for :libs:geo:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:native:processResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:plugin-api:processResources (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :libs:plugin-analysis-api:processResources NO-SOURCE +Skipping task ':libs:plugin-analysis-api:processResources' as it has no source files and no previous output files. +:libs:native:processMain22Resources (Thread[#1327,Execution worker Thread 6,5,main]) started. +Resolve mutations for :libs:plugin-analysis-api:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:plugin-analysis-api:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:grok:classes UP-TO-DATE +Skipping task ':libs:grok:classes' as it has no actions. + +> Task :libs:core:compileJava UP-TO-DATE +Caching disabled for task ':libs:core:compileJava' because: + Build cache is disabled +Skipping task ':libs:core:compileJava' as it is up-to-date. +No compile result for :libs:core:compileJava +:libs:geo:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :libs:logging:classes UP-TO-DATE +Skipping task ':libs:logging:classes' as it has no actions. + +> Task :client:rest:classes UP-TO-DATE +Skipping task ':client:rest:classes' as it has no actions. + +> Task :libs:geo:classes UP-TO-DATE +Skipping task ':libs:geo:classes' as it has no actions. +Resolve mutations for :client:rest:jar (Thread[#1324,Execution worker Thread 3,5,main]) started. +Resolve mutations for :libs:logging:jar (Thread[#1392,included builds Thread 2,5,main]) started. +No compile result for :libs:core:compileJava + +> Task :libs:native:processMain22Resources NO-SOURCE +Skipping task ':libs:native:processMain22Resources' as it has no source files and no previous output files. +No compile result for :libs:core:compileJava +:client:rest:jar (Thread[#1324,Execution worker Thread 3,5,main]) started. +Resolve mutations for :libs:grok:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:native:processResources NO-SOURCE +Skipping task ':libs:native:processResources' as it has no source files and no previous output files. +work action resolve main (project :client:rest) (Thread[#1330,Execution worker Thread 9,5,main]) started. +Resolve mutations for :libs:simdvec:processTestResources (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :libs:entitlement:bridge:classes UP-TO-DATE +Skipping task ':libs:entitlement:bridge:classes' as it has no actions. + +> Task :libs:plugin-analysis-api:classes UP-TO-DATE +Skipping task ':libs:plugin-analysis-api:classes' as it has no actions. + +> Task :libs:lz4:processResources NO-SOURCE +Skipping task ':libs:lz4:processResources' as it has no source files and no previous output files. +:libs:simdvec:processTestResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +Resolve mutations for :libs:entitlement:bridge:compileMain23Java (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:grok:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :libs:ssl-config:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. +Resolve mutations for :libs:secure-sm:processResources (Thread[#1327,Execution worker Thread 6,5,main]) started. +:libs:logging:jar (Thread[#1392,included builds Thread 2,5,main]) started. + +> Task :libs:simdvec:processTestResources NO-SOURCE +Skipping task ':libs:simdvec:processTestResources' as it has no source files and no previous output files. +No compile result for :libs:core:compileJava + +> Task :libs:plugin-api:processResources NO-SOURCE +Skipping task ':libs:plugin-api:processResources' as it has no source files and no previous output files. +Resolve mutations for :libs:geo:jar (Thread[#1329,Execution worker Thread 8,5,main]) started. +work action resolve main (project :libs:core) (Thread[#1332,Execution worker Thread 11,5,main]) started. +work action resolve main (project :libs:core) (Thread[#1323,Execution worker Thread 2,5,main]) started. +Resolve mutations for :libs:tdigest:processResources (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:secure-sm:processResources (Thread[#1327,Execution worker Thread 6,5,main]) started. +:libs:ssl-config:processResources (Thread[#1326,Execution worker Thread 5,5,main]) started. +:libs:entitlement:bridge:compileMain23Java (Thread[#1331,Execution worker Thread 10,5,main]) started. +Resolve mutations for :libs:plugin-analysis-api:jar (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:lz4:compileJava (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:x-content:compileJava (Thread[#1330,Execution worker Thread 9,5,main]) started. +Resolve mutations for :libs:cli:compileJava (Thread[#1323,Execution worker Thread 2,5,main]) started. +Resolve mutations for :libs:native:compileJava (Thread[#1329,Execution worker Thread 8,5,main]) started. +Resolve mutations for :libs:core:classes (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :libs:tdigest:compileJava (Thread[#1332,Execution worker Thread 11,5,main]) started. +:libs:lz4:compileJava (Thread[#1325,Execution worker Thread 4,5,main]) started. + +> Task :libs:simdvec:cleanTest +Caching disabled for task ':libs:simdvec:cleanTest' because: + Build cache is disabled +Task ':libs:simdvec:cleanTest' is not up-to-date because: + Task has not declared any outputs despite executing actions. +:libs:cli:compileJava (Thread[#1323,Execution worker Thread 2,5,main]) started. + +> Task :libs:ssl-config:processResources NO-SOURCE +Skipping task ':libs:ssl-config:processResources' as it has no source files and no previous output files. +:libs:tdigest:compileJava (Thread[#1332,Execution worker Thread 11,5,main]) started. +:libs:native:compileJava (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:x-content:compileJava (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :libs:secure-sm:processResources NO-SOURCE +Skipping task ':libs:secure-sm:processResources' as it has no source files and no previous output files. +:libs:geo:jar (Thread[#1326,Execution worker Thread 5,5,main]) started. +:libs:core:classes (Thread[#1322,Execution worker,5,main]) started. +:libs:plugin-analysis-api:jar (Thread[#1327,Execution worker Thread 6,5,main]) started. + +> Task :libs:entitlement:bridge:compileMain23Java UP-TO-DATE +Caching disabled for task ':libs:entitlement:bridge:compileMain23Java' because: + Build cache is disabled +Skipping task ':libs:entitlement:bridge:compileMain23Java' as it is up-to-date. + +> Task :libs:lz4:compileJava UP-TO-DATE +Caching disabled for task ':libs:lz4:compileJava' because: + Build cache is disabled +Skipping task ':libs:lz4:compileJava' as it is up-to-date. +No compile result for :libs:lz4:compileJava +No compile result for :libs:lz4:compileJava +No compile result for :libs:lz4:compileJava +No compile result for :libs:entitlement:bridge:compileMain23Java + +> Task :libs:cli:compileJava UP-TO-DATE +Caching disabled for task ':libs:cli:compileJava' because: + Build cache is disabled +Skipping task ':libs:cli:compileJava' as it is up-to-date. +No compile result for :libs:cli:compileJava +No compile result for :libs:cli:compileJava +No compile result for :libs:cli:compileJava +No compile result for :libs:cli:compileJava +work action resolve main (project :libs:cli) (Thread[#1323,Execution worker Thread 2,5,main]) started. +No compile result for :libs:entitlement:bridge:compileMain23Java +No compile result for :libs:entitlement:bridge:compileMain23Java +No compile result for :libs:lz4:compileJava +work action resolve main (project :libs:lz4) (Thread[#1325,Execution worker Thread 4,5,main]) started. +No compile result for :libs:entitlement:bridge:compileMain23Java + +> Task :libs:native:compileJava UP-TO-DATE +Caching disabled for task ':libs:native:compileJava' because: + Build cache is disabled +Skipping task ':libs:native:compileJava' as it is up-to-date. +No compile result for :libs:native:compileJava +Resolve mutations for :libs:cli:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. +work action resolve main (project :libs:cli) (Thread[#1323,Execution worker Thread 2,5,main]) started. +No compile result for :libs:native:compileJava +work action resolve main (project :libs:lz4) (Thread[#1325,Execution worker Thread 4,5,main]) started. + +> Task :libs:core:classes UP-TO-DATE +Skipping task ':libs:core:classes' as it has no actions. +No compile result for :libs:native:compileJava +:libs:cli:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:tdigest:compileJava UP-TO-DATE +Caching disabled for task ':libs:tdigest:compileJava' because: + Build cache is disabled +Skipping task ':libs:tdigest:compileJava' as it is up-to-date. +Resolve mutations for :libs:entitlement:bridge:main23Classes (Thread[#1323,Execution worker Thread 2,5,main]) started. +No compile result for :libs:tdigest:compileJava +:libs:entitlement:bridge:main23Classes (Thread[#1323,Execution worker Thread 2,5,main]) started. + +> Task :libs:x-content:compileJava UP-TO-DATE +Caching disabled for task ':libs:x-content:compileJava' because: + Build cache is disabled +Skipping task ':libs:x-content:compileJava' as it is up-to-date. +No compile result for :libs:native:compileJava + +> Task :libs:entitlement:bridge:main23Classes UP-TO-DATE +Skipping task ':libs:entitlement:bridge:main23Classes' as it has no actions. +Resolve mutations for :libs:core:jar (Thread[#1322,Execution worker,5,main]) started. +work action resolve main (project :libs:native) (Thread[#1323,Execution worker Thread 2,5,main]) started. +Resolve mutations for :libs:lz4:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:entitlement:bridge:java23Jar (Thread[#1323,Execution worker Thread 2,5,main]) started. +:libs:lz4:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. +work action resolve main (project :libs:native) (Thread[#1329,Execution worker Thread 8,5,main]) started. +No compile result for :libs:x-content:compileJava +:libs:entitlement:bridge:java23Jar (Thread[#1323,Execution worker Thread 2,5,main]) started. +No compile result for :libs:tdigest:compileJava +No compile result for :libs:tdigest:compileJava +No compile result for :libs:tdigest:compileJava +No compile result for :libs:x-content:compileJava +Resolve mutations for :libs:simdvec:compileJava (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:core:jar (Thread[#1322,Execution worker,5,main]) started. + +> Task :libs:lz4:classes UP-TO-DATE +Skipping task ':libs:lz4:classes' as it has no actions. +No compile result for :libs:x-content:compileJava +work action resolve main (project :libs:tdigest) (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :libs:cli:classes UP-TO-DATE +Skipping task ':libs:cli:classes' as it has no actions. +Resolve mutations for :libs:lz4:jar (Thread[#1332,Execution worker Thread 11,5,main]) started. +No compile result for :libs:x-content:compileJava +work action resolve main (project :libs:x-content) (Thread[#1330,Execution worker Thread 9,5,main]) started. +work action resolve main (project :libs:tdigest) (Thread[#1325,Execution worker Thread 4,5,main]) started. +:libs:lz4:jar (Thread[#1332,Execution worker Thread 11,5,main]) started. +Resolve mutations for :libs:native:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. +:libs:simdvec:compileJava (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:native:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. +work action resolve main (project :libs:x-content) (Thread[#1330,Execution worker Thread 9,5,main]) started. +Resolve mutations for :libs:cli:jar (Thread[#1331,Execution worker Thread 10,5,main]) started. +Resolve mutations for :libs:entitlement:compileJava (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:cli:jar (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:native:classes UP-TO-DATE +Skipping task ':libs:native:classes' as it has no actions. +Resolve mutations for :libs:native:compileMain22Java (Thread[#1325,Execution worker Thread 4,5,main]) started. +:libs:entitlement:compileJava (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :libs:entitlement:bridge:java23Jar UP-TO-DATE +Caching disabled for task ':libs:entitlement:bridge:java23Jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:entitlement:bridge:java23Jar' as it is up-to-date. +work action resolve elasticsearch-entitlement-bridge-9.0.0-SNAPSHOT.jar (project :libs:entitlement:bridge) (Thread[#1323,Execution worker Thread 2,5,main]) started. +:libs:native:compileMain22Java (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:plugin-api:classes (Thread[#1323,Execution worker Thread 2,5,main]) started. +:libs:plugin-api:classes (Thread[#1323,Execution worker Thread 2,5,main]) started. + +> Task :libs:plugin-api:classes UP-TO-DATE +Skipping task ':libs:plugin-api:classes' as it has no actions. +Resolve mutations for :libs:plugin-api:jar (Thread[#1323,Execution worker Thread 2,5,main]) started. +:libs:plugin-api:jar (Thread[#1323,Execution worker Thread 2,5,main]) started. + +> Task :libs:simdvec:compileJava UP-TO-DATE +Caching disabled for task ':libs:simdvec:compileJava' because: + Build cache is disabled +Skipping task ':libs:simdvec:compileJava' as it is up-to-date. +No compile result for :libs:simdvec:compileJava +No compile result for :libs:simdvec:compileJava +No compile result for :libs:simdvec:compileJava +No compile result for :libs:simdvec:compileJava +Resolve mutations for :libs:simdvec:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:simdvec:classes (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :libs:entitlement:compileJava UP-TO-DATE +Caching disabled for task ':libs:entitlement:compileJava' because: + Build cache is disabled +Skipping task ':libs:entitlement:compileJava' as it is up-to-date. +No compile result for :libs:entitlement:compileJava + +> Task :libs:native:compileMain22Java UP-TO-DATE +Caching disabled for task ':libs:native:compileMain22Java' because: + Build cache is disabled +Skipping task ':libs:native:compileMain22Java' as it is up-to-date. +No compile result for :libs:entitlement:compileJava +No compile result for :libs:native:compileMain22Java +No compile result for :libs:native:compileMain22Java +No compile result for :libs:entitlement:compileJava +No compile result for :libs:native:compileMain22Java +No compile result for :libs:native:compileMain22Java +No compile result for :libs:entitlement:compileJava +work action resolve main (project :libs:simdvec) (Thread[#1325,Execution worker Thread 4,5,main]) started. +work action resolve main (project :libs:entitlement) (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :libs:simdvec:classes UP-TO-DATE +Skipping task ':libs:simdvec:classes' as it has no actions. +work action resolve main (project :libs:simdvec) (Thread[#1330,Execution worker Thread 9,5,main]) started. +work action resolve main (project :libs:entitlement) (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:simdvec:compileMain21Java (Thread[#1329,Execution worker Thread 8,5,main]) started. +Resolve mutations for :libs:native:main22Classes (Thread[#1330,Execution worker Thread 9,5,main]) started. +Resolve mutations for :server:compileJava (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:entitlement:asm-provider:compileJava (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:simdvec:compileMain21Java (Thread[#1329,Execution worker Thread 8,5,main]) started. +:server:compileJava (Thread[#1325,Execution worker Thread 4,5,main]) started. +:libs:entitlement:asm-provider:compileJava (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :client:rest:jar UP-TO-DATE +Caching disabled for task ':client:rest:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':client:rest:jar' as it is up-to-date. +:libs:native:main22Classes (Thread[#1324,Execution worker Thread 3,5,main]) started. + +> Task :libs:native:main22Classes UP-TO-DATE +Skipping task ':libs:native:main22Classes' as it has no actions. +Resolve mutations for :libs:native:jar (Thread[#1324,Execution worker Thread 3,5,main]) started. +:libs:native:jar (Thread[#1324,Execution worker Thread 3,5,main]) started. + +> Task :libs:simdvec:compileMain21Java UP-TO-DATE +Caching disabled for task ':libs:simdvec:compileMain21Java' because: + Build cache is disabled +Skipping task ':libs:simdvec:compileMain21Java' as it is up-to-date. +No compile result for :libs:simdvec:compileMain21Java +No compile result for :libs:simdvec:compileMain21Java + +> Task :libs:entitlement:asm-provider:compileJava UP-TO-DATE +Caching disabled for task ':libs:entitlement:asm-provider:compileJava' because: + Build cache is disabled +Skipping task ':libs:entitlement:asm-provider:compileJava' as it is up-to-date. +No compile result for :libs:simdvec:compileMain21Java +No compile result for :libs:entitlement:asm-provider:compileJava +No compile result for :libs:simdvec:compileMain21Java +No compile result for :libs:entitlement:asm-provider:compileJava +Resolve mutations for :libs:simdvec:main21Classes (Thread[#1329,Execution worker Thread 8,5,main]) started. +No compile result for :libs:entitlement:asm-provider:compileJava +No compile result for :libs:entitlement:asm-provider:compileJava +:libs:simdvec:main21Classes (Thread[#1329,Execution worker Thread 8,5,main]) started. +Resolve mutations for :libs:entitlement:asm-provider:classes (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:entitlement:asm-provider:classes (Thread[#1330,Execution worker Thread 9,5,main]) started. + +> Task :libs:simdvec:main21Classes UP-TO-DATE +Skipping task ':libs:simdvec:main21Classes' as it has no actions. + +> Task :libs:entitlement:asm-provider:classes UP-TO-DATE +Skipping task ':libs:entitlement:asm-provider:classes' as it has no actions. +Resolve mutations for :libs:simdvec:compileMain22Java (Thread[#1329,Execution worker Thread 8,5,main]) started. +Resolve mutations for :libs:entitlement:asm-provider:jar (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:entitlement:asm-provider:jar (Thread[#1330,Execution worker Thread 9,5,main]) started. +:libs:simdvec:compileMain22Java (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :libs:simdvec:compileMain22Java UP-TO-DATE +Caching disabled for task ':libs:simdvec:compileMain22Java' because: + Build cache is disabled +Skipping task ':libs:simdvec:compileMain22Java' as it is up-to-date. +No compile result for :libs:simdvec:compileMain22Java +No compile result for :libs:simdvec:compileMain22Java +No compile result for :libs:simdvec:compileMain22Java +No compile result for :libs:simdvec:compileMain22Java +Resolve mutations for :libs:simdvec:main22Classes (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:simdvec:main22Classes (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :libs:simdvec:main22Classes UP-TO-DATE +Skipping task ':libs:simdvec:main22Classes' as it has no actions. +Resolve mutations for :libs:simdvec:jar (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:simdvec:jar (Thread[#1329,Execution worker Thread 8,5,main]) started. + +> Task :libs:logging:jar UP-TO-DATE +Caching disabled for task ':libs:logging:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:logging:jar' as it is up-to-date. +Resolve mutations for :libs:secure-sm:classes (Thread[#1392,included builds Thread 2,5,main]) started. +:libs:secure-sm:classes (Thread[#1392,included builds Thread 2,5,main]) started. + +> Task :libs:secure-sm:classes UP-TO-DATE +Skipping task ':libs:secure-sm:classes' as it has no actions. +Resolve mutations for :libs:secure-sm:jar (Thread[#1392,included builds Thread 2,5,main]) started. +:libs:secure-sm:jar (Thread[#1392,included builds Thread 2,5,main]) started. + +> Task :server:compileJava UP-TO-DATE +Caching disabled for task ':server:compileJava' because: + Build cache is disabled +Skipping task ':server:compileJava' as it is up-to-date. +No compile result for :server:compileJava +No compile result for :server:compileJava +No compile result for :server:compileJava +No compile result for :server:compileJava +Resolve mutations for :server:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. +:server:classes (Thread[#1325,Execution worker Thread 4,5,main]) started. + +> Task :server:classes UP-TO-DATE +Skipping task ':server:classes' as it has no actions. + +> Task :libs:grok:jar UP-TO-DATE +Caching disabled for task ':libs:grok:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:grok:jar' as it is up-to-date. +Resolve mutations for :server:jar (Thread[#1325,Execution worker Thread 4,5,main]) started. +Resolve mutations for :libs:ssl-config:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +:server:jar (Thread[#1325,Execution worker Thread 4,5,main]) started. +:libs:ssl-config:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:ssl-config:compileJava UP-TO-DATE +Caching disabled for task ':libs:ssl-config:compileJava' because: + Build cache is disabled +Skipping task ':libs:ssl-config:compileJava' as it is up-to-date. +No compile result for :libs:ssl-config:compileJava +No compile result for :libs:ssl-config:compileJava +No compile result for :libs:ssl-config:compileJava +No compile result for :libs:ssl-config:compileJava +work action resolve main (project :server) (Thread[#1328,Execution worker Thread 7,5,main]) started. +work action resolve main (project :libs:ssl-config) (Thread[#1328,Execution worker Thread 7,5,main]) started. +work action resolve main (project :server) (Thread[#1328,Execution worker Thread 7,5,main]) started. +work action resolve main (project :libs:ssl-config) (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :modules:transport-netty4:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +:modules:transport-netty4:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:geo:jar UP-TO-DATE +Caching disabled for task ':libs:geo:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:geo:jar' as it is up-to-date. +Resolve mutations for :libs:ssl-config:classes (Thread[#1326,Execution worker Thread 5,5,main]) started. +:libs:ssl-config:classes (Thread[#1326,Execution worker Thread 5,5,main]) started. + +> Task :libs:ssl-config:classes UP-TO-DATE +Skipping task ':libs:ssl-config:classes' as it has no actions. + +> Task :modules:transport-netty4:compileJava UP-TO-DATE +Caching disabled for task ':modules:transport-netty4:compileJava' because: + Build cache is disabled +Skipping task ':modules:transport-netty4:compileJava' as it is up-to-date. +No compile result for :modules:transport-netty4:compileJava +Resolve mutations for :libs:ssl-config:jar (Thread[#1326,Execution worker Thread 5,5,main]) started. +No compile result for :modules:transport-netty4:compileJava +No compile result for :modules:transport-netty4:compileJava +No compile result for :modules:transport-netty4:compileJava +:libs:ssl-config:jar (Thread[#1326,Execution worker Thread 5,5,main]) started. +work action resolve main (project :modules:transport-netty4) (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :test:framework:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +:test:framework:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :test:framework:compileJava UP-TO-DATE +Caching disabled for task ':test:framework:compileJava' because: + Build cache is disabled +Skipping task ':test:framework:compileJava' as it is up-to-date. +No compile result for :test:framework:compileJava +No compile result for :test:framework:compileJava +No compile result for :test:framework:compileJava +No compile result for :test:framework:compileJava +work action resolve main (project :test:framework) (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :libs:simdvec:compileTestJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:simdvec:compileTestJava (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:plugin-analysis-api:jar UP-TO-DATE +Caching disabled for task ':libs:plugin-analysis-api:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:plugin-analysis-api:jar' as it is up-to-date. +:libs:tdigest:processResources (Thread[#1327,Execution worker Thread 6,5,main]) started. + +> Task :libs:tdigest:processResources NO-SOURCE +Skipping task ':libs:tdigest:processResources' as it has no source files and no previous output files. +Resolve mutations for :libs:tdigest:classes (Thread[#1327,Execution worker Thread 6,5,main]) started. +:libs:tdigest:classes (Thread[#1327,Execution worker Thread 6,5,main]) started. + +> Task :libs:tdigest:classes UP-TO-DATE +Skipping task ':libs:tdigest:classes' as it has no actions. +Resolve mutations for :libs:tdigest:jar (Thread[#1327,Execution worker Thread 6,5,main]) started. +:libs:tdigest:jar (Thread[#1327,Execution worker Thread 6,5,main]) started. + +> Task :libs:simdvec:compileTestJava UP-TO-DATE +Caching disabled for task ':libs:simdvec:compileTestJava' because: + Build cache is disabled +Skipping task ':libs:simdvec:compileTestJava' as it is up-to-date. +No compile result for :libs:simdvec:compileTestJava +No compile result for :libs:simdvec:compileTestJava +No compile result for :libs:simdvec:compileTestJava +No compile result for :libs:simdvec:compileTestJava +Resolve mutations for :libs:simdvec:testClasses (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:simdvec:testClasses (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:simdvec:testClasses UP-TO-DATE +Skipping task ':libs:simdvec:testClasses' as it has no actions. +Resolve mutations for :libs:x-content:impl:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:impl:compileJava (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:x-content:impl:compileJava UP-TO-DATE +Caching disabled for task ':libs:x-content:impl:compileJava' because: + Build cache is disabled +Skipping task ':libs:x-content:impl:compileJava' as it is up-to-date. +No compile result for :libs:x-content:impl:compileJava +No compile result for :libs:x-content:impl:compileJava +No compile result for :libs:x-content:impl:compileJava +No compile result for :libs:x-content:impl:compileJava +Resolve mutations for :libs:x-content:impl:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:impl:processResources (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:x-content:impl:processResources UP-TO-DATE +Caching disabled for task ':libs:x-content:impl:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:x-content:impl:processResources' as it is up-to-date. +Resolve mutations for :libs:x-content:impl:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:impl:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:x-content:impl:classes UP-TO-DATE +Skipping task ':libs:x-content:impl:classes' as it has no actions. +Resolve mutations for :libs:x-content:impl:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:impl:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:lz4:jar UP-TO-DATE +Caching disabled for task ':libs:lz4:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:lz4:jar' as it is up-to-date. +Resolve mutations for :libs:x-content:processResources (Thread[#1332,Execution worker Thread 11,5,main]) started. +:libs:x-content:processResources (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :libs:x-content:processResources NO-SOURCE +Skipping task ':libs:x-content:processResources' as it has no source files and no previous output files. +Resolve mutations for :modules:transport-netty4:processResources (Thread[#1332,Execution worker Thread 11,5,main]) started. +:modules:transport-netty4:processResources (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :modules:transport-netty4:processResources NO-SOURCE +Skipping task ':modules:transport-netty4:processResources' as it has no source files and no previous output files. +Resolve mutations for :modules:transport-netty4:classes (Thread[#1332,Execution worker Thread 11,5,main]) started. +:modules:transport-netty4:classes (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :modules:transport-netty4:classes UP-TO-DATE +Skipping task ':modules:transport-netty4:classes' as it has no actions. +Resolve mutations for :modules:transport-netty4:jar (Thread[#1332,Execution worker Thread 11,5,main]) started. +:modules:transport-netty4:jar (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :libs:core:jar UP-TO-DATE +Caching disabled for task ':libs:core:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:core:jar' as it is up-to-date. +Resolve mutations for :test:framework:processResources (Thread[#1322,Execution worker,5,main]) started. +:test:framework:processResources (Thread[#1322,Execution worker,5,main]) started. + +> Task :test:framework:processResources UP-TO-DATE +Caching disabled for task ':test:framework:processResources' because: + Build cache is disabled + Not worth caching +Skipping task ':test:framework:processResources' as it is up-to-date. +Resolve mutations for :test:framework:classes (Thread[#1322,Execution worker,5,main]) started. +:test:framework:classes (Thread[#1322,Execution worker,5,main]) started. + +> Task :test:framework:classes UP-TO-DATE +Skipping task ':test:framework:classes' as it has no actions. +Resolve mutations for :test:framework:jar (Thread[#1322,Execution worker,5,main]) started. +:test:framework:jar (Thread[#1322,Execution worker,5,main]) started. + +> Task :libs:cli:jar UP-TO-DATE +Caching disabled for task ':libs:cli:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:cli:jar' as it is up-to-date. +Resolve mutations for :test:immutable-collections-patch:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. +:test:immutable-collections-patch:compileJava (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :test:immutable-collections-patch:compileJava UP-TO-DATE +Caching disabled for task ':test:immutable-collections-patch:compileJava' because: + Build cache is disabled +Skipping task ':test:immutable-collections-patch:compileJava' as it is up-to-date. +No compile result for :test:immutable-collections-patch:compileJava +No compile result for :test:immutable-collections-patch:compileJava +No compile result for :test:immutable-collections-patch:compileJava +No compile result for :test:immutable-collections-patch:compileJava +Resolve mutations for :test:immutable-collections-patch:processResources (Thread[#1331,Execution worker Thread 10,5,main]) started. +:test:immutable-collections-patch:processResources (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :test:immutable-collections-patch:processResources NO-SOURCE +Skipping task ':test:immutable-collections-patch:processResources' as it has no source files and no previous output files. +Resolve mutations for :test:immutable-collections-patch:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. +:test:immutable-collections-patch:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :test:immutable-collections-patch:classes UP-TO-DATE +Skipping task ':test:immutable-collections-patch:classes' as it has no actions. +Resolve mutations for :test:immutable-collections-patch:generatePatch (Thread[#1331,Execution worker Thread 10,5,main]) started. +:test:immutable-collections-patch:generatePatch (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :test:immutable-collections-patch:generatePatch UP-TO-DATE +Caching disabled for task ':test:immutable-collections-patch:generatePatch' because: + Build cache is disabled + Gradle would require more information to cache this task +Skipping task ':test:immutable-collections-patch:generatePatch' as it is up-to-date. +Resolve mutations for :libs:native:native-libraries:extractLibs (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:native:native-libraries:extractLibs (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:plugin-api:jar UP-TO-DATE +Caching disabled for task ':libs:plugin-api:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:plugin-api:jar' as it is up-to-date. +work action resolve jdk-patches (project :test:immutable-collections-patch) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-grok-9.0.0-SNAPSHOT.jar (project :libs:grok) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-plugin-api-9.0.0-SNAPSHOT.jar (project :libs:plugin-api) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-geo-9.0.0-SNAPSHOT.jar (project :libs:geo) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-rest-client-9.0.0-SNAPSHOT.jar (project :client:rest) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-core-9.0.0-SNAPSHOT.jar (project :libs:core) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-lz4-9.0.0-SNAPSHOT.jar (project :libs:lz4) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-cli-9.0.0-SNAPSHOT.jar (project :libs:cli) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-plugin-analysis-api-9.0.0-SNAPSHOT.jar (project :libs:plugin-analysis-api) (Thread[#1323,Execution worker Thread 2,5,main]) started. +work action resolve elasticsearch-logging-9.0.0-SNAPSHOT.jar (project :libs:logging) (Thread[#1323,Execution worker Thread 2,5,main]) started. + +> Task :libs:native:native-libraries:extractLibs UP-TO-DATE +Transforming zstd-1.5.5-darwin-aarch64.jar (org.elasticsearch:zstd:1.5.5) with UnzipTransform +Transforming zstd-1.5.5-darwin-x86-64.jar (org.elasticsearch:zstd:1.5.5) with UnzipTransform +Transforming zstd-1.5.5-linux-aarch64.jar (org.elasticsearch:zstd:1.5.5) with UnzipTransform +Transforming zstd-1.5.5-linux-x86-64.jar (org.elasticsearch:zstd:1.5.5) with UnzipTransform +Transforming zstd-1.5.5-windows-x86-64.jar (org.elasticsearch:zstd:1.5.5) with UnzipTransform +Transforming vec-1.0.10.zip (org.elasticsearch:vec:1.0.10) with UnzipTransform +Caching disabled for task ':libs:native:native-libraries:extractLibs' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:native:native-libraries:extractLibs' as it is up-to-date. +work action resolve platform (project :libs:native:native-libraries) (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:native:jar UP-TO-DATE +Caching disabled for task ':libs:native:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:native:jar' as it is up-to-date. +work action resolve elasticsearch-native-9.0.0-SNAPSHOT.jar (project :libs:native) (Thread[#1324,Execution worker Thread 3,5,main]) started. + +> Task :libs:entitlement:asm-provider:jar UP-TO-DATE +Caching disabled for task ':libs:entitlement:asm-provider:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:entitlement:asm-provider:jar' as it is up-to-date. +work action resolve elasticsearch-entitlement-asm-provider-9.0.0-SNAPSHOT.jar (project :libs:entitlement:asm-provider) (Thread[#1330,Execution worker Thread 9,5,main]) started. +UnzipTransform (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Transform elasticsearch-entitlement-asm-provider-9.0.0-SNAPSHOT.jar (project :libs:entitlement:asm-provider) with UnzipTransform +Transforming elasticsearch-entitlement-asm-provider-9.0.0-SNAPSHOT.jar (project :libs:entitlement:asm-provider) with UnzipTransform +Resolve mutations for :libs:entitlement:generateAsm-providerProviderManifest (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:entitlement:generateAsm-providerProviderManifest (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:entitlement:generateAsm-providerProviderManifest UP-TO-DATE +Transforming asm-9.7.1.jar (org.ow2.asm:asm:9.7.1) with UnzipTransform +Caching disabled for task ':libs:entitlement:generateAsm-providerProviderManifest' because: + Build cache is disabled + Caching has not been enabled for the task +Skipping task ':libs:entitlement:generateAsm-providerProviderManifest' as it is up-to-date. +Resolve mutations for :libs:entitlement:generateAsm-providerProviderImpl (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:entitlement:generateAsm-providerProviderImpl (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:entitlement:generateAsm-providerProviderImpl UP-TO-DATE +Caching disabled for task ':libs:entitlement:generateAsm-providerProviderImpl' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:entitlement:generateAsm-providerProviderImpl' as it is up-to-date. +Resolve mutations for :libs:entitlement:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:entitlement:classes (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:entitlement:classes UP-TO-DATE +Skipping task ':libs:entitlement:classes' as it has no actions. +Resolve mutations for :libs:entitlement:compileMain23Java (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:entitlement:compileMain23Java (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:entitlement:compileMain23Java UP-TO-DATE +Caching disabled for task ':libs:entitlement:compileMain23Java' because: + Build cache is disabled +Skipping task ':libs:entitlement:compileMain23Java' as it is up-to-date. +No compile result for :libs:entitlement:compileMain23Java +No compile result for :libs:entitlement:compileMain23Java +No compile result for :libs:entitlement:compileMain23Java + +> Task :libs:simdvec:jar UP-TO-DATE +Caching disabled for task ':libs:simdvec:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:simdvec:jar' as it is up-to-date. +No compile result for :libs:entitlement:compileMain23Java +Resolve mutations for :libs:entitlement:main23Classes (Thread[#1331,Execution worker Thread 10,5,main]) started. +work action resolve elasticsearch-simdvec-9.0.0-SNAPSHOT.jar (project :libs:simdvec) (Thread[#1329,Execution worker Thread 8,5,main]) started. +:libs:entitlement:main23Classes (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:entitlement:main23Classes UP-TO-DATE +Skipping task ':libs:entitlement:main23Classes' as it has no actions. +Resolve mutations for :libs:entitlement:jar (Thread[#1331,Execution worker Thread 10,5,main]) started. +:libs:entitlement:jar (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:secure-sm:jar UP-TO-DATE +Caching disabled for task ':libs:secure-sm:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:secure-sm:jar' as it is up-to-date. +work action resolve elasticsearch-secure-sm-9.0.0-SNAPSHOT.jar (project :libs:secure-sm) (Thread[#1392,included builds Thread 2,5,main]) started. + +> Task :server:jar UP-TO-DATE +Caching disabled for task ':server:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':server:jar' as it is up-to-date. +work action resolve elasticsearch-9.0.0-SNAPSHOT.jar (project :server) (Thread[#1325,Execution worker Thread 4,5,main]) started. + +> Task :libs:ssl-config:jar UP-TO-DATE +Caching disabled for task ':libs:ssl-config:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:ssl-config:jar' as it is up-to-date. +work action resolve elasticsearch-ssl-config-9.0.0-SNAPSHOT.jar (project :libs:ssl-config) (Thread[#1326,Execution worker Thread 5,5,main]) started. + +> Task :libs:tdigest:jar UP-TO-DATE +Caching disabled for task ':libs:tdigest:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:tdigest:jar' as it is up-to-date. +work action resolve elasticsearch-tdigest-9.0.0-SNAPSHOT.jar (project :libs:tdigest) (Thread[#1327,Execution worker Thread 6,5,main]) started. + +> Task :libs:x-content:impl:jar UP-TO-DATE +Caching disabled for task ':libs:x-content:impl:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:x-content:impl:jar' as it is up-to-date. +work action resolve x-content-impl-9.0.0-SNAPSHOT.jar (project :libs:x-content:impl) (Thread[#1328,Execution worker Thread 7,5,main]) started. +UnzipTransform (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Transform x-content-impl-9.0.0-SNAPSHOT.jar (project :libs:x-content:impl) with UnzipTransform +Transforming x-content-impl-9.0.0-SNAPSHOT.jar (project :libs:x-content:impl) with UnzipTransform +Resolve mutations for :libs:x-content:generateImplProviderManifest (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:generateImplProviderManifest (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:x-content:generateImplProviderManifest UP-TO-DATE +Transforming jackson-core-2.17.2.jar (com.fasterxml.jackson.core:jackson-core:2.17.2) with UnzipTransform +Transforming jackson-dataformat-smile-2.17.2.jar (com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.17.2) with UnzipTransform +Transforming jackson-dataformat-yaml-2.17.2.jar (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.17.2) with UnzipTransform +Transforming jackson-dataformat-cbor-2.17.2.jar (com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:2.17.2) with UnzipTransform +Transforming snakeyaml-2.0.jar (org.yaml:snakeyaml:2.0) with UnzipTransform +Caching disabled for task ':libs:x-content:generateImplProviderManifest' because: + Build cache is disabled + Caching has not been enabled for the task +Skipping task ':libs:x-content:generateImplProviderManifest' as it is up-to-date. +Resolve mutations for :libs:x-content:generateImplProviderImpl (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:generateImplProviderImpl (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:x-content:generateImplProviderImpl UP-TO-DATE +Caching disabled for task ':libs:x-content:generateImplProviderImpl' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:x-content:generateImplProviderImpl' as it is up-to-date. +Resolve mutations for :libs:x-content:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:classes (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :libs:x-content:classes UP-TO-DATE +Skipping task ':libs:x-content:classes' as it has no actions. +Resolve mutations for :libs:x-content:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:x-content:jar (Thread[#1328,Execution worker Thread 7,5,main]) started. + +> Task :modules:transport-netty4:jar UP-TO-DATE +Caching disabled for task ':modules:transport-netty4:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':modules:transport-netty4:jar' as it is up-to-date. +work action resolve transport-netty4-9.0.0-SNAPSHOT.jar (project :modules:transport-netty4) (Thread[#1332,Execution worker Thread 11,5,main]) started. + +> Task :test:framework:jar UP-TO-DATE +Caching disabled for task ':test:framework:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':test:framework:jar' as it is up-to-date. +work action resolve framework-9.0.0-SNAPSHOT.jar (project :test:framework) (Thread[#1322,Execution worker,5,main]) started. + +> Task :libs:entitlement:jar UP-TO-DATE +Caching disabled for task ':libs:entitlement:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:entitlement:jar' as it is up-to-date. +work action resolve elasticsearch-entitlement-9.0.0-SNAPSHOT.jar (project :libs:entitlement) (Thread[#1331,Execution worker Thread 10,5,main]) started. + +> Task :libs:x-content:jar UP-TO-DATE +Caching disabled for task ':libs:x-content:jar' because: + Build cache is disabled + Not worth caching +Skipping task ':libs:x-content:jar' as it is up-to-date. +work action resolve elasticsearch-x-content-9.0.0-SNAPSHOT.jar (project :libs:x-content) (Thread[#1328,Execution worker Thread 7,5,main]) started. +Resolve mutations for :libs:simdvec:test (Thread[#1328,Execution worker Thread 7,5,main]) started. +:libs:simdvec:test (Thread[#1328,Execution worker Thread 7,5,main]) started. +Gradle Test Executor 18 started executing tests. +Gradle Test Executor 17 started executing tests. +Gradle Test Executor 19 started executing tests. +Gradle Test Executor 19 finished executing tests. +WARNING: A terminally deprecated method in java.lang.System has been called +WARNING: System::setSecurityManager has been called by org.gradle.api.internal.tasks.testing.worker.TestWorker (file:/Users/rene/.gradle/wrapper/dists/gradle-8.11.1-all/2qik7nd48slq1ooc2496ixf4i/gradle-8.11.1/lib/plugins/gradle-testing-base-infrastructure-8.11.1.jar) +WARNING: Please consider reporting this to the maintainers of org.gradle.api.internal.tasks.testing.worker.TestWorker +WARNING: System::setSecurityManager will be removed in a future release +Gradle Test Executor 18 finished executing tests. +WARNING: A terminally deprecated method in java.lang.System has been called +WARNING: System::setSecurityManager has been called by org.gradle.api.internal.tasks.testing.worker.TestWorker (file:/Users/rene/.gradle/wrapper/dists/gradle-8.11.1-all/2qik7nd48slq1ooc2496ixf4i/gradle-8.11.1/lib/plugins/gradle-testing-base-infrastructure-8.11.1.jar) +WARNING: Please consider reporting this to the maintainers of org.gradle.api.internal.tasks.testing.worker.TestWorker +WARNING: System::setSecurityManager will be removed in a future release +Gradle Test Executor 17 finished executing tests. + +> Task :libs:simdvec:test +Caching disabled for task ':libs:simdvec:test' because: + Build cache is disabled +Task ':libs:simdvec:test' is not up-to-date because: + Output property 'binaryResultsDirectory' file /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/test-results/test/binary has been removed. + Output property 'binaryResultsDirectory' file /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/test-results/test/binary/output.bin has been removed. + Output property 'binaryResultsDirectory' file /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/test-results/test/binary/output.bin.idx has been removed. + and more... + and more... +Starting process 'Gradle Test Executor 17'. Working directory: /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/testrun/test Command: /Users/rene/.gradle/jdks/oracle_corporation-23-aarch64-os_x.2/jdk-23.jdk/Contents/Home/bin/java -Des.nativelibs.path=/Users/rene/dev/elastic/elasticsearch/libs/native/libraries/build/platform/darwin-aarch64 -Des.scripting.update.ctx_in_params=false -Des.search.rewrite_sort=true -Des.transport.cname_in_publish_address=true -Dgradle.dist.lib=/Users/rene/.gradle/wrapper/dists/gradle-8.11.1-all/2qik7nd48slq1ooc2496ixf4i/gradle-8.11.1/lib -Dgradle.user.home=/Users/rene/.gradle -Dgradle.worker.jar=/Users/rene/.gradle/caches/8.11.1/workerMain/gradle-worker.jar -Dio.netty.noKeySetOptimization=true -Dio.netty.noUnsafe=true -Dio.netty.recycler.maxCapacityPerThread=0 -Djava.awt.headless=true -Djava.locale.providers=CLDR -Djava.security.manager=allow -Djna.nosys=true -Dorg.gradle.internal.worker.tmpdir=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/tmp/test/work -Dtests.artifact=simdvec -Dtests.gradle=true -Dtests.hackImmutableCollections=true -Dtests.logger.level=WARN -Dtests.security.manager=true -Dtests.seed=7B469FBE8B6D0C65 -Dtests.task=:libs:simdvec:test -Dtests.testfeatures.enabled=true -Dworkspace.dir=/Users/rene/dev/elastic/elasticsearch --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.security.cert=ALL-UNNAMED --add-opens=java.base/java.nio.channels=ALL-UNNAMED --add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/javax.net.ssl=ALL-UNNAMED --add-opens=java.base/java.nio.file=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED --add-opens=java.management/java.lang.management=ALL-UNNAMED -XX:+HeapDumpOnOutOfMemoryError -esa --add-modules=jdk.incubator.vector -XX:HeapDumpPath=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/heapdump --patch-module=java.base=/Users/rene/dev/elastic/elasticsearch/test/immutable-collections-patch/build/jdk-patches/java.base --add-opens=java.base/java.util=ALL-UNNAMED @/Users/rene/.gradle/.tmp/gradle-worker-classpath15834082604113316666txt -Xms512m -Xmx512m -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/testrun/test/temp -Duser.country=DE -Duser.language=en -Duser.variant -ea worker.org.gradle.process.internal.worker.GradleWorkerMain 'Gradle Test Executor 17' +Starting process 'Gradle Test Executor 19'. Working directory: /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/testrun/test Command: /Users/rene/.gradle/jdks/oracle_corporation-23-aarch64-os_x.2/jdk-23.jdk/Contents/Home/bin/java -Des.nativelibs.path=/Users/rene/dev/elastic/elasticsearch/libs/native/libraries/build/platform/darwin-aarch64 -Des.scripting.update.ctx_in_params=false -Des.search.rewrite_sort=true -Des.transport.cname_in_publish_address=true -Dgradle.dist.lib=/Users/rene/.gradle/wrapper/dists/gradle-8.11.1-all/2qik7nd48slq1ooc2496ixf4i/gradle-8.11.1/lib -Dgradle.user.home=/Users/rene/.gradle -Dgradle.worker.jar=/Users/rene/.gradle/caches/8.11.1/workerMain/gradle-worker.jar -Dio.netty.noKeySetOptimization=true -Dio.netty.noUnsafe=true -Dio.netty.recycler.maxCapacityPerThread=0 -Djava.awt.headless=true -Djava.locale.providers=CLDR -Djava.security.manager=allow -Djna.nosys=true -Dorg.gradle.internal.worker.tmpdir=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/tmp/test/work -Dtests.artifact=simdvec -Dtests.gradle=true -Dtests.hackImmutableCollections=true -Dtests.logger.level=WARN -Dtests.security.manager=true -Dtests.seed=7B469FBE8B6D0C65 -Dtests.task=:libs:simdvec:test -Dtests.testfeatures.enabled=true -Dworkspace.dir=/Users/rene/dev/elastic/elasticsearch --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.security.cert=ALL-UNNAMED --add-opens=java.base/java.nio.channels=ALL-UNNAMED --add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/javax.net.ssl=ALL-UNNAMED --add-opens=java.base/java.nio.file=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED --add-opens=java.management/java.lang.management=ALL-UNNAMED -XX:+HeapDumpOnOutOfMemoryError -esa --add-modules=jdk.incubator.vector -XX:HeapDumpPath=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/heapdump --patch-module=java.base=/Users/rene/dev/elastic/elasticsearch/test/immutable-collections-patch/build/jdk-patches/java.base --add-opens=java.base/java.util=ALL-UNNAMED @/Users/rene/.gradle/.tmp/gradle-worker-classpath8760626708071365177txt -Xms512m -Xmx512m -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/testrun/test/temp -Duser.country=DE -Duser.language=en -Duser.variant -ea worker.org.gradle.process.internal.worker.GradleWorkerMain 'Gradle Test Executor 19' +Starting process 'Gradle Test Executor 18'. Working directory: /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/testrun/test Command: /Users/rene/.gradle/jdks/oracle_corporation-23-aarch64-os_x.2/jdk-23.jdk/Contents/Home/bin/java -Des.nativelibs.path=/Users/rene/dev/elastic/elasticsearch/libs/native/libraries/build/platform/darwin-aarch64 -Des.scripting.update.ctx_in_params=false -Des.search.rewrite_sort=true -Des.transport.cname_in_publish_address=true -Dgradle.dist.lib=/Users/rene/.gradle/wrapper/dists/gradle-8.11.1-all/2qik7nd48slq1ooc2496ixf4i/gradle-8.11.1/lib -Dgradle.user.home=/Users/rene/.gradle -Dgradle.worker.jar=/Users/rene/.gradle/caches/8.11.1/workerMain/gradle-worker.jar -Dio.netty.noKeySetOptimization=true -Dio.netty.noUnsafe=true -Dio.netty.recycler.maxCapacityPerThread=0 -Djava.awt.headless=true -Djava.locale.providers=CLDR -Djava.security.manager=allow -Djna.nosys=true -Dorg.gradle.internal.worker.tmpdir=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/tmp/test/work -Dtests.artifact=simdvec -Dtests.gradle=true -Dtests.hackImmutableCollections=true -Dtests.logger.level=WARN -Dtests.security.manager=true -Dtests.seed=7B469FBE8B6D0C65 -Dtests.task=:libs:simdvec:test -Dtests.testfeatures.enabled=true -Dworkspace.dir=/Users/rene/dev/elastic/elasticsearch --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.security.cert=ALL-UNNAMED --add-opens=java.base/java.nio.channels=ALL-UNNAMED --add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/javax.net.ssl=ALL-UNNAMED --add-opens=java.base/java.nio.file=ALL-UNNAMED --add-opens=java.base/java.time=ALL-UNNAMED --add-opens=java.management/java.lang.management=ALL-UNNAMED -XX:+HeapDumpOnOutOfMemoryError -esa --add-modules=jdk.incubator.vector -XX:HeapDumpPath=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/heapdump --patch-module=java.base=/Users/rene/dev/elastic/elasticsearch/test/immutable-collections-patch/build/jdk-patches/java.base --add-opens=java.base/java.util=ALL-UNNAMED @/Users/rene/.gradle/.tmp/gradle-worker-classpath6774643281130261042txt -Xms512m -Xmx512m -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/testrun/test/temp -Duser.country=DE -Duser.language=en -Duser.variant -ea worker.org.gradle.process.internal.worker.GradleWorkerMain 'Gradle Test Executor 18' +Successfully started process 'Gradle Test Executor 17' +Successfully started process 'Gradle Test Executor 18' +Successfully started process 'Gradle Test Executor 19' +WARNING: Using incubator modules: jdk.incubator.vector +WARNING: Using incubator modules: jdk.incubator.vector +WARNING: Using incubator modules: jdk.incubator.vector + +VectorScorerFactoryTests STANDARD_ERROR + WARNING: A restricted method in + java.lang.foreign.Linker + has been called + WARNING: + java.lang.foreign.Linker::downcallHandle + has been called by + org.elasticsearch.nativeaccess.jdk.LinkerHelper + in + an unnamed module + + WARNING: Use --enable-native-access= + ALL-UNNAMED + to avoid a warning for callers in this module + WARNING: Restricted methods will be blocked in a future release unless native access is enabled + + +ESVectorUtilTests STANDARD_ERROR + WARNING: A restricted method in + java.lang.foreign.Linker + has been called + WARNING: + java.lang.foreign.Linker::downcallHandle + has been called by + org.elasticsearch.nativeaccess.jdk.LinkerHelper + in + an unnamed module + + WARNING: Use --enable-native-access= + ALL-UNNAMED + to avoid a warning for callers in this module + WARNING: Restricted methods will be blocked in a future release unless native access is enabled + + +VectorScorerFactoryTests STANDARD_OUT + [2024-12-23T09:52:40,834][INFO ][o.e.n.j.JdkVectorLibrary ] [[SUITE-VectorScorerFactoryTests-seed#[7B469FBE8B6D0C65]]] vec_caps=1 + [2024-12-23T09:52:40,837][INFO ][o.e.n.NativeAccess ] [[SUITE-VectorScorerFactoryTests-seed#[7B469FBE8B6D0C65]]] Using native vector library; to disable start with -Dorg.elasticsearch.nativeaccess.enableVectorLibrary=false + +ESVectorUtilTests STANDARD_OUT + [2024-12-23T09:52:40,837][INFO ][o.e.n.j.JdkVectorLibrary ] [[SUITE-ESVectorUtilTests-seed#[7B469FBE8B6D0C65]]] vec_caps=1 + [2024-12-23T09:52:40,840][INFO ][o.e.n.NativeAccess ] [[SUITE-ESVectorUtilTests-seed#[7B469FBE8B6D0C65]]] Using native vector library; to disable start with -Dorg.elasticsearch.nativeaccess.enableVectorLibrary=false + +VectorScorerFactoryTests STANDARD_OUT + [2024-12-23T09:52:40,842][INFO ][o.e.n.NativeAccess ] [[SUITE-VectorScorerFactoryTests-seed#[7B469FBE8B6D0C65]]] Using [jdk] native provider and native methods for [MacOS] + +ESVectorUtilTests STANDARD_OUT + [2024-12-23T09:52:40,844][INFO ][o.e.n.NativeAccess ] [[SUITE-ESVectorUtilTests-seed#[7B469FBE8B6D0C65]]] Using [jdk] native provider and native methods for [MacOS] + [2024-12-23T09:52:41,180][INFO ][o.e.s.i.v.ESVectorizationProvider] [[SUITE-ESVectorUtilTests-seed#[7B469FBE8B6D0C65]]] Java vector incubator API enabled; uses preferredBitSize=128 + +ESVectorUtilTests STANDARD_ERROR + Dec 23, 2024 9:52:41 AM org.apache.lucene.internal.vectorization.PanamaVectorizationProvider + INFO: Java vector incubator API enabled; uses preferredBitSize=128 + +VectorScorerFactoryTests STANDARD_ERROR + Dec 23, 2024 9:52:41 AM org.apache.lucene.internal.vectorization.PanamaVectorizationProvider + INFO: Java vector incubator API enabled; uses preferredBitSize=128 + +VectorScorerFactoryTests > testRandomScorerMax STANDARD_OUT + [2024-12-23T04:52:41,272][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerMax] JDK=23, os=Mac OS X, arch=aarch64 + +ESVectorUtilTests > testBitAndCount STANDARD_OUT + [2024-12-23T20:52:41,275][INFO ][o.e.s.ESVectorUtilTests ] [testBitAndCount] before test + [2024-12-23T20:52:41,282][INFO ][o.e.s.i.v.ESVectorizationProvider] [testBitAndCount] Java vector incubator API enabled; uses preferredBitSize=128 + +VectorScorerFactoryTests > testRandomScorerMax STANDARD_OUT + [2024-12-23T04:52:41,282][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerMax] before test + +ESVectorUtilTests > testBitAndCount STANDARD_OUT + [2024-12-23T20:52:41,293][INFO ][o.e.s.ESVectorUtilTests ] [testBitAndCount] after test + +VectorScorerFactoryTests > testRandomScorerMax STANDARD_OUT + [2024-12-23T04:52:41,295][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerMax] Testing testRandom-COSINE-2168.vex + +ESVectorUtilTests > testIpByteBin STANDARD_OUT + [2024-12-23T20:52:41,298][INFO ][o.e.s.ESVectorUtilTests ] [testIpByteBin] before test + +VectorScorerFactoryTests > testRandomScorerMax STANDARD_OUT + [2024-12-23T04:52:41,494][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerMax] Testing testRandom-DOT_PRODUCT-1685.vex + +ESVectorUtilTests > testIpByteBin STANDARD_OUT + [2024-12-23T20:52:41,494][INFO ][o.e.s.ESVectorUtilTests ] [testIpByteBin] after test + +ESVectorUtilTests > testIpByteBinInvariants STANDARD_OUT + [2024-12-23T20:52:41,498][INFO ][o.e.s.ESVectorUtilTests ] [testIpByteBinInvariants] before test + [2024-12-23T20:52:41,499][INFO ][o.e.s.ESVectorUtilTests ] [testIpByteBinInvariants] after test + +ESVectorUtilTests > testBasicIpByteBin STANDARD_OUT + [2024-12-23T20:52:41,502][INFO ][o.e.s.ESVectorUtilTests ] [testBasicIpByteBin] before test + [2024-12-23T20:52:41,502][INFO ][o.e.s.ESVectorUtilTests ] [testBasicIpByteBin] after test + +ESVectorUtilTests > testIpByteBit STANDARD_OUT + [2024-12-23T20:52:41,505][INFO ][o.e.s.ESVectorUtilTests ] [testIpByteBit] before test + [2024-12-23T20:52:41,505][INFO ][o.e.s.ESVectorUtilTests ] [testIpByteBit] after test + +ESVectorUtilTests > testIpFloatBit STANDARD_OUT + [2024-12-23T20:52:41,507][INFO ][o.e.s.ESVectorUtilTests ] [testIpFloatBit] before test + [2024-12-23T20:52:41,508][INFO ][o.e.s.ESVectorUtilTests ] [testIpFloatBit] after test + +VectorScorerFactoryTests > testRandomScorerMax STANDARD_OUT + [2024-12-23T04:52:41,517][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerMax] Testing testRandom-EUCLIDEAN-723.vex + [2024-12-23T04:52:41,592][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerMax] Testing testRandom-MAXIMUM_INNER_PRODUCT-2519.vex + [2024-12-23T04:52:41,628][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerMax] after test + +VectorScorerFactoryTests > testSimpleMaxChunkSizeSmall STANDARD_OUT + [2024-12-23T04:52:41,629][INFO ][o.e.s.VectorScorerFactoryTests] [testSimpleMaxChunkSizeSmall] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,632][INFO ][o.e.s.VectorScorerFactoryTests] [testSimpleMaxChunkSizeSmall] before test + [2024-12-23T04:52:41,632][INFO ][o.e.s.VectorScorerFactoryTests] [testSimpleMaxChunkSizeSmall] maxChunkSize=14 + [2024-12-23T04:52:41,645][INFO ][o.e.s.VectorScorerFactoryTests] [testSimpleMaxChunkSizeSmall] after test + +VectorScorerFactoryTests > testRace STANDARD_OUT + [2024-12-23T04:52:41,645][INFO ][o.e.s.VectorScorerFactoryTests] [testRace] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,648][INFO ][o.e.s.VectorScorerFactoryTests] [testRace] before test + [2024-12-23T04:52:41,659][INFO ][o.e.s.VectorScorerFactoryTests] [testRace] after test + +VectorScorerFactoryTests > testNonNegativeDotProduct STANDARD_OUT + [2024-12-23T04:52:41,659][INFO ][o.e.s.VectorScorerFactoryTests] [testNonNegativeDotProduct] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,662][INFO ][o.e.s.VectorScorerFactoryTests] [testNonNegativeDotProduct] before test + [2024-12-23T04:52:41,663][INFO ][o.e.s.VectorScorerFactoryTests] [testNonNegativeDotProduct] after test + +VectorScorerFactoryTests > testRandomSlice STANDARD_OUT + [2024-12-23T04:52:41,664][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,666][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] before test + [2024-12-23T04:52:41,667][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-0-30 + [2024-12-23T04:52:41,669][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-1-30 + [2024-12-23T04:52:41,671][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-2-30 + [2024-12-23T04:52:41,672][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-3-30 + [2024-12-23T04:52:41,674][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-4-30 + [2024-12-23T04:52:41,675][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-5-30 + [2024-12-23T04:52:41,676][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-6-30 + [2024-12-23T04:52:41,677][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-7-30 + [2024-12-23T04:52:41,679][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-8-30 + [2024-12-23T04:52:41,681][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-9-30 + [2024-12-23T04:52:41,682][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-10-30 + [2024-12-23T04:52:41,683][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-11-30 + [2024-12-23T04:52:41,684][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-12-30 + [2024-12-23T04:52:41,685][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-13-30 + [2024-12-23T04:52:41,687][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-14-30 + [2024-12-23T04:52:41,689][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-15-30 + [2024-12-23T04:52:41,690][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-16-30 + [2024-12-23T04:52:41,691][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-17-30 + [2024-12-23T04:52:41,692][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-18-30 + [2024-12-23T04:52:41,693][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-19-30 + [2024-12-23T04:52:41,694][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-20-30 + [2024-12-23T04:52:41,696][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-21-30 + [2024-12-23T04:52:41,697][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-22-30 + [2024-12-23T04:52:41,698][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-23-30 + [2024-12-23T04:52:41,699][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-24-30 + [2024-12-23T04:52:41,700][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-25-30 + [2024-12-23T04:52:41,702][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-26-30 + [2024-12-23T04:52:41,703][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-27-30 + [2024-12-23T04:52:41,703][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-28-30 + [2024-12-23T04:52:41,705][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-29-30 + [2024-12-23T04:52:41,706][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-30-30 + [2024-12-23T04:52:41,707][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-31-30 + [2024-12-23T04:52:41,708][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-32-30 + [2024-12-23T04:52:41,710][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-33-30 + [2024-12-23T04:52:41,711][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-34-30 + [2024-12-23T04:52:41,712][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-35-30 + [2024-12-23T04:52:41,713][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-36-30 + [2024-12-23T04:52:41,715][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-37-30 + [2024-12-23T04:52:41,716][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-38-30 + [2024-12-23T04:52:41,717][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-39-30 + [2024-12-23T04:52:41,718][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-40-30 + [2024-12-23T04:52:41,719][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-41-30 + [2024-12-23T04:52:41,720][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-42-30 + [2024-12-23T04:52:41,722][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-43-30 + [2024-12-23T04:52:41,723][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-44-30 + [2024-12-23T04:52:41,724][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-45-30 + [2024-12-23T04:52:41,725][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-46-30 + [2024-12-23T04:52:41,727][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-47-30 + [2024-12-23T04:52:41,727][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-48-30 + [2024-12-23T04:52:41,728][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-49-30 + [2024-12-23T04:52:41,729][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-50-30 + [2024-12-23T04:52:41,731][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-51-30 + [2024-12-23T04:52:41,732][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-52-30 + [2024-12-23T04:52:41,733][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-53-30 + [2024-12-23T04:52:41,734][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-54-30 + [2024-12-23T04:52:41,735][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-55-30 + [2024-12-23T04:52:41,736][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-56-30 + [2024-12-23T04:52:41,738][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-57-30 + [2024-12-23T04:52:41,739][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-58-30 + [2024-12-23T04:52:41,740][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-59-30 + [2024-12-23T04:52:41,741][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-60-30 + [2024-12-23T04:52:41,742][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-61-30 + [2024-12-23T04:52:41,743][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-62-30 + [2024-12-23T04:52:41,744][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-63-30 + [2024-12-23T04:52:41,746][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-64-30 + [2024-12-23T04:52:41,746][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-65-30 + [2024-12-23T04:52:41,747][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-66-30 + [2024-12-23T04:52:41,749][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-67-30 + [2024-12-23T04:52:41,750][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-68-30 + [2024-12-23T04:52:41,751][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-69-30 + [2024-12-23T04:52:41,752][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-70-30 + [2024-12-23T04:52:41,753][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-71-30 + [2024-12-23T04:52:41,755][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-72-30 + [2024-12-23T04:52:41,755][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-73-30 + [2024-12-23T04:52:41,757][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-74-30 + [2024-12-23T04:52:41,758][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-75-30 + [2024-12-23T04:52:41,759][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-76-30 + [2024-12-23T04:52:41,760][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-77-30 + [2024-12-23T04:52:41,761][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-78-30 + [2024-12-23T04:52:41,761][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-79-30 + [2024-12-23T04:52:41,762][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-80-30 + [2024-12-23T04:52:41,764][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-81-30 + [2024-12-23T04:52:41,764][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-82-30 + [2024-12-23T04:52:41,766][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-83-30 + [2024-12-23T04:52:41,767][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-84-30 + [2024-12-23T04:52:41,768][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-85-30 + [2024-12-23T04:52:41,769][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-86-30 + [2024-12-23T04:52:41,770][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-87-30 + [2024-12-23T04:52:41,771][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-88-30 + [2024-12-23T04:52:41,772][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-89-30 + [2024-12-23T04:52:41,772][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-90-30 + [2024-12-23T04:52:41,774][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-91-30 + [2024-12-23T04:52:41,774][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-92-30 + [2024-12-23T04:52:41,775][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-93-30 + [2024-12-23T04:52:41,776][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-94-30 + [2024-12-23T04:52:41,777][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-95-30 + [2024-12-23T04:52:41,778][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-96-30 + [2024-12-23T04:52:41,779][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-97-30 + [2024-12-23T04:52:41,780][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-98-30 + [2024-12-23T04:52:41,781][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] Testing testRandomSliceImpl-99-30 + [2024-12-23T04:52:41,782][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomSlice] after test + +VectorScorerFactoryTests > testRandomMin STANDARD_OUT + [2024-12-23T04:52:41,783][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMin] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,785][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMin] before test + [2024-12-23T04:52:41,786][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMin] Testing testRandom-279 + [2024-12-23T04:52:41,799][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMin] after test + +VectorScorerFactoryTests > testRandomMaxChunkSizeSmall STANDARD_OUT + [2024-12-23T04:52:41,799][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMaxChunkSizeSmall] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,801][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMaxChunkSizeSmall] before test + [2024-12-23T04:52:41,802][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMaxChunkSizeSmall] maxChunkSize=118 + [2024-12-23T04:52:41,802][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMaxChunkSizeSmall] Testing testRandom-3935 + [2024-12-23T04:52:41,916][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMaxChunkSizeSmall] after test + +VectorScorerFactoryTests > testLarge SKIPPED + +VectorScorerFactoryTests > testRandomMax STANDARD_OUT + [2024-12-23T04:52:41,917][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMax] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,919][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMax] before test + [2024-12-23T04:52:41,920][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMax] Testing testRandom-3531 + [2024-12-23T04:52:41,946][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomMax] after test + +VectorScorerFactoryTests > testRandom STANDARD_OUT + [2024-12-23T04:52:41,946][INFO ][o.e.s.VectorScorerFactoryTests] [testRandom] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,949][INFO ][o.e.s.VectorScorerFactoryTests] [testRandom] before test + [2024-12-23T04:52:41,949][INFO ][o.e.s.VectorScorerFactoryTests] [testRandom] Testing testRandom-1840 + [2024-12-23T04:52:41,980][INFO ][o.e.s.VectorScorerFactoryTests] [testRandom] after test + +VectorScorerFactoryTests > testRandomScorer STANDARD_OUT + [2024-12-23T04:52:41,981][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorer] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:41,985][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorer] before test + [2024-12-23T04:52:41,986][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorer] Testing testRandom-COSINE-199.vex + [2024-12-23T04:52:41,989][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorer] Testing testRandom-DOT_PRODUCT-3144.vex + [2024-12-23T04:52:41,992][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorer] Testing testRandom-EUCLIDEAN-3017.vex + [2024-12-23T04:52:42,020][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorer] Testing testRandom-MAXIMUM_INNER_PRODUCT-1915.vex + [2024-12-23T04:52:42,025][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorer] after test + +VectorScorerFactoryTests > testSimple STANDARD_OUT + [2024-12-23T04:52:42,025][INFO ][o.e.s.VectorScorerFactoryTests] [testSimple] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:42,027][INFO ][o.e.s.VectorScorerFactoryTests] [testSimple] before test + [2024-12-23T04:52:42,032][INFO ][o.e.s.VectorScorerFactoryTests] [testSimple] after test + +VectorScorerFactoryTests > testRandomScorerChunkSizeSmall STANDARD_OUT + [2024-12-23T04:52:42,032][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:42,034][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] before test + [2024-12-23T04:52:42,034][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] maxChunkSize=76 + [2024-12-23T04:52:42,035][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] Testing testRandom-COSINE-2044.vex + [2024-12-23T04:52:42,061][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] Testing testRandom-DOT_PRODUCT-159.vex + [2024-12-23T04:52:42,063][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] Testing testRandom-EUCLIDEAN-455.vex + [2024-12-23T04:52:42,069][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] Testing testRandom-MAXIMUM_INNER_PRODUCT-1088.vex + [2024-12-23T04:52:42,071][INFO ][o.e.s.VectorScorerFactoryTests] [testRandomScorerChunkSizeSmall] after test + +VectorScorerFactoryTests > testSupport STANDARD_OUT + [2024-12-23T04:52:42,072][INFO ][o.e.s.VectorScorerFactoryTests] [testSupport] JDK=23, os=Mac OS X, arch=aarch64 + [2024-12-23T04:52:42,074][INFO ][o.e.s.VectorScorerFactoryTests] [testSupport] before test + [2024-12-23T04:52:42,074][INFO ][o.e.s.VectorScorerFactoryTests] [testSupport] after test +Finished generating test XML results (0.001 secs) into: /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/test-results/test +Generating HTML test report... +Finished generating test html results (0.002 secs) into: /Users/rene/dev/elastic/elasticsearch/libs/simdvec/build/reports/tests/test + +0 problems were found storing the configuration cache. + +See the complete report at file:///Users/rene/dev/elastic/elasticsearch/build/reports/configuration-cache/bf67ihcjtxveqczwzl7s4owb2/9o6bg5x67n4fyxqy871utzrqc/configuration-cache-report.html + +BUILD SUCCESSFUL in 4s +86 actionable tasks: 2 executed, 84 up-to-date +Watched directory hierarchies: [/Users/rene/dev/elastic/elasticsearch] + +Publishing build scan... +https://gradle-enterprise.elastic.co/s/s75qfwti22ejk + +Configuration cache entry stored. diff --git a/libs/tdigest/build.gradle b/libs/tdigest/build.gradle index 2713df701fb44..b79a6ce0a486a 100644 --- a/libs/tdigest/build.gradle +++ b/libs/tdigest/build.gradle @@ -36,7 +36,7 @@ tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } -ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) +ext.projectLicenses.set(['The Apache Software License, Version 2.0': providers.provider(() -> 'http://www.apache.org/licenses/LICENSE-2.0')]) licenseFile.set(rootProject.file('licenses/APACHE-LICENSE-2.0.txt')) tasks.withType(LicenseHeadersTask.class).configureEach { diff --git a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java index 06724b049f821..61bec8e41b163 100644 --- a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java +++ b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/MergingDigest.java @@ -111,7 +111,7 @@ public class MergingDigest extends AbstractTDigest { // based on accumulated k-index. This can be much faster since we // scale functions are more expensive than the corresponding // weight limits. - public static boolean useWeightLimit = true; + public static final boolean useWeightLimit = true; static MergingDigest create(TDigestArrays arrays, double compression) { arrays.adjustBreaker(SHALLOW_SIZE); diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index 3b5fb6ddecde9..d65b6e8fd2ddd 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Adds "built in" aggregations to Elasticsearch.' - classname 'org.elasticsearch.aggregations.AggregationsPlugin' + description = 'Adds "built in" aggregations to Elasticsearch.' + classname ='org.elasticsearch.aggregations.AggregationsPlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java index 5249077bdfdbb..d225ccc9d173f 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/SearchCancellationIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.aggregations.AggregationsPlugin; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -96,8 +97,12 @@ public void testCancellationDuringTimeSeriesAggregation() throws Exception { } logger.info("Executing search"); + // we have to explicitly set error_trace=true for the later exception check for `TimeSeriesIndexSearcher` + Client client = client(); + client.threadPool().getThreadContext().putHeader("error_trace", "true"); TimeSeriesAggregationBuilder timeSeriesAggregationBuilder = new TimeSeriesAggregationBuilder("test_agg"); - ActionFuture searchResponse = prepareSearch("test").setQuery(matchAllQuery()) + ActionFuture searchResponse = client.prepareSearch("test") + .setQuery(matchAllQuery()) .addAggregation( timeSeriesAggregationBuilder.subAggregation( new ScriptedMetricAggregationBuilder("sub_agg").initScript( diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java index eb78101c6181a..9380610d730bf 100644 --- a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesTsidHashCardinalityIT.java @@ -99,17 +99,12 @@ public void setUp() throws Exception { .setSettings( settings.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersions.NEW_INDEXVERSION_FORMAT).build() ) - .setMapping(mapping) - .get() - ); - - assertAcked( + .setMapping(mapping), indicesAdmin().prepareCreate(afterIndex) .setSettings( settings.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersions.TIME_SERIES_ID_HASHING).build() ) .setMapping(mapping) - .get() ); final TimeSeriesDataset timeSeriesDataset = new TimeSeriesDataset(); diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index f4f7e787d2b7b..173e1eeef60a2 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Adds "built in" analyzers to Elasticsearch.' - classname 'org.elasticsearch.analysis.common.CommonAnalysisPlugin' + description = 'Adds "built in" analyzers to Elasticsearch.' + classname = 'org.elasticsearch.analysis.common.CommonAnalysisPlugin' extendedPlugins = ['lang-painless'] } @@ -37,3 +37,16 @@ artifacts { tasks.named("yamlRestCompatTestTransform").configure { task -> task.replaceValueInMatch("tokens.0.token", "absenț", "romanian") } + +tasks.named("yamlRestTest").configure { + if (buildParams.getRuntimeJavaVersion().map{ it.majorVersion.toInteger() }.get() >= 24 || + "-Des.entitlements.enabled=true".equals(System.getProperty("tests.jvm.argline"))) { + systemProperty 'tests.rest.blacklist', + [ + // AWAITSFIX: this test relies on security manager, which doesn't exist in JDK 24. + // and entitlements don't yet replace the functionality. + // see https://github.com/elastic/elasticsearch/issues/119130 + 'analysis-common/40_token_filters/stemmer_override file access', + ].join(',') + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index a97154fd4d1ff..c980aaba71444 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -101,7 +101,12 @@ import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.apache.lucene.analysis.util.ElisionFilter; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; @@ -134,6 +139,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(CommonAnalysisPlugin.class); + private final SetOnce scriptServiceHolder = new SetOnce<>(); private final SetOnce synonymsManagementServiceHolder = new SetOnce<>(); @@ -224,6 +231,28 @@ public Map> getTokenFilters() { filters.put("dictionary_decompounder", requiresAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new)); filters.put("dutch_stem", DutchStemTokenFilterFactory::new); filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); + filters.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + return new EdgeNGramTokenFilterFactory(indexSettings, environment, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); + } else { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_deprecation", + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead." + ); + } + return super.create(tokenStream); + } + + }; + }); filters.put("elision", requiresAnalysisSettings(ElisionTokenFilterFactory::new)); filters.put("fingerprint", FingerprintTokenFilterFactory::new); filters.put("flatten_graph", FlattenGraphTokenFilterFactory::new); @@ -243,6 +272,28 @@ public Map> getTokenFilters() { filters.put("min_hash", MinHashTokenFilterFactory::new); filters.put("multiplexer", MultiplexerTokenFilterFactory::new); filters.put("ngram", NGramTokenFilterFactory::new); + filters.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + return new NGramTokenFilterFactory(indexSettings, environment, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); + } else { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_deprecation", + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead." + ); + } + return super.create(tokenStream); + } + + }; + }); filters.put("pattern_capture", requiresAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); @@ -294,7 +345,39 @@ public Map> getTokenizers() { tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new); tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); tokenizers.put("thai", ThaiTokenizerFactory::new); + tokenizers.put("nGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead." + ); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead." + ); + } + return new NGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("ngram", NGramTokenizerFactory::new); + tokenizers.put("edgeNGram", (IndexSettings indexSettings, Environment environment, String name, Settings settings) -> { + if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_nGram] for indices created in versions 8 or higher instead." + ); + } else if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead." + ); + } + return new EdgeNGramTokenizerFactory(indexSettings, environment, name, settings); + }); tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); @@ -505,17 +588,53 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add( - PreConfiguredTokenizer.indexVersion( - "edge_ngram", - (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) - ) - ); + tokenizers.add(PreConfiguredTokenizer.indexVersion("edge_ngram", (version) -> { + if (version.onOrAfter(IndexVersions.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API // This is already broken with normalization, so backwards compat isn't necessary? tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new)); + + tokenizers.add(PreConfiguredTokenizer.indexVersion("nGram", (version) -> { + if (version.onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [nGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [ngram] for indices created in versions 8 or higher instead." + ); + } else if (version.onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "nGram_tokenizer_deprecation", + "The [nGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [ngram] instead." + ); + } + return new NGramTokenizer(); + })); + tokenizers.add(PreConfiguredTokenizer.indexVersion("edgeNGram", (version) -> { + if (version.onOrAfter(IndexVersions.V_8_0_0)) { + throw new IllegalArgumentException( + "The [edgeNGram] tokenizer name was deprecated in 7.6. " + + "Please use the tokenizer name to [edge_ngram] for indices created in versions 8 or higher instead." + ); + } else if (version.onOrAfter(IndexVersions.V_7_6_0)) { + deprecationLogger.warn( + DeprecationCategory.ANALYSIS, + "edgeNGram_tokenizer_deprecation", + "The [edgeNGram] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [edge_ngram] instead." + ); + } + if (version.onOrAfter(IndexVersions.V_7_3_0)) { + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); + } + return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); return tokenizers; diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java index 1fcbfb4bc2c8d..68acf46f21c24 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java @@ -24,11 +24,11 @@ */ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider { - public static ParseField SEPARATOR = new ParseField("separator"); - public static ParseField MAX_OUTPUT_SIZE = new ParseField("max_output_size"); + public static final ParseField SEPARATOR = new ParseField("separator"); + public static final ParseField MAX_OUTPUT_SIZE = new ParseField("max_output_size"); - public static int DEFAULT_MAX_OUTPUT_SIZE = 255; - public static CharArraySet DEFAULT_STOP_WORDS = CharArraySet.EMPTY_SET; + public static final int DEFAULT_MAX_OUTPUT_SIZE = 255; + public static final CharArraySet DEFAULT_STOP_WORDS = CharArraySet.EMPTY_SET; public static final char DEFAULT_SEPARATOR = ' '; private final FingerprintAnalyzer analyzer; diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index a6e9fccd9d09c..362192ed40df3 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -89,8 +89,6 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { private final String language; - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(StemmerTokenFilterFactory.class); - StemmerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException { super(name); this.language = Strings.capitalize(settings.get("language", settings.get("name", "porter"))); @@ -192,7 +190,7 @@ public boolean incrementToken() { } else if ("german".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new GermanStemmer()); } else if ("german2".equalsIgnoreCase(language)) { - DEPRECATION_LOGGER.critical( + deprecationLogger.critical( DeprecationCategory.ANALYSIS, "german2_stemmer_deprecation", "The 'german2' stemmer has been deprecated and folded into the 'german' Stemmer. " diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java index cb5c01621f45f..04481515384a8 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SynonymTokenFilterFactory.java @@ -13,7 +13,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.synonym.SynonymFilter; import org.apache.lucene.analysis.synonym.SynonymMap; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService.IndexCreationContext; @@ -130,8 +129,6 @@ public static SynonymsSource fromSettings(Settings settings) { } } - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(SynonymTokenFilterFactory.class); - private final String format; private final boolean expand; private final boolean lenient; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java new file mode 100644 index 0000000000000..9972d58b2dcc1 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -0,0 +1,292 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.index.IndexVersionUtils; + +import java.io.IOException; +import java.util.Map; + +public class CommonAnalysisPluginTests extends ESTestCase { + + /** + * Check that the deprecated "nGram" filter throws exception for indices created since 7.0.0 and + * logs a warning for earlier indices when the filter is used as a custom filter + */ + public void testNGramFilterInCustomAnalyzerDeprecationError() throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "nGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin) + ); + assertEquals( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead.", + ex.getMessage() + ); + } + + final Settings settingsPre7 = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "nGram") + .build(); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); + assertWarnings( + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead." + ); + } + } + + /** + * Check that the deprecated "edgeNGram" filter throws exception for indices created since 7.0.0 and + * logs a warning for earlier indices when the filter is used as a custom filter + */ + public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "edgeNGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin) + ); + assertEquals( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead.", + ex.getMessage() + ); + } + + final Settings settingsPre7 = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put( + IndexMetadata.SETTING_VERSION_CREATED, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) + ) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") + .put("index.analysis.filter.my_ngram.type", "edgeNGram") + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); + assertWarnings( + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead." + ); + } + } + + /** + * Check that we log a deprecation warning for "nGram" and "edgeNGram" tokenizer names with 7.6 and + * disallow usages for indices created after 8.0 + */ + public void testNGramTokenizerDeprecation() throws IOException { + // tests for prebuilt tokenizer + doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestPrebuiltTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestPrebuiltTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + + // same batch of tests for custom tokenizer definition in the settings + doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), + false + ); + doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.V_7_6_0, + IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) + ), + true + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestCustomTokenizerDeprecation( + "nGram", + "ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + expectThrows( + IllegalArgumentException.class, + () -> doTestCustomTokenizerDeprecation( + "edgeNGram", + "edge_ngram", + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_8_0_0, IndexVersion.current()), + true + ) + ); + } + + public void doTestPrebuiltTokenizerDeprecation(String deprecatedName, String replacement, IndexVersion version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenizers = createTestAnalysis( + IndexSettingsModule.newIndexSettings("index", settings), + settings, + commonAnalysisPlugin + ).tokenizer; + TokenizerFactory tokenizerFactory = tokenizers.get(deprecatedName); + + Tokenizer tokenizer = tokenizerFactory.create(); + assertNotNull(tokenizer); + if (expectWarning) { + assertWarnings( + "The [" + + deprecatedName + + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + + replacement + + "] instead." + ); + } + } + } + + public void doTestCustomTokenizerDeprecation(String deprecatedName, String replacement, IndexVersion version, boolean expectWarning) + throws IOException { + final Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put("index.analysis.analyzer.custom_analyzer.type", "custom") + .put("index.analysis.analyzer.custom_analyzer.tokenizer", "my_tokenizer") + .put("index.analysis.tokenizer.my_tokenizer.type", deprecatedName) + .build(); + + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settings), settings, commonAnalysisPlugin); + + if (expectWarning) { + assertWarnings( + "The [" + + deprecatedName + + "] tokenizer name is deprecated and will be removed in a future version. " + + "Please change the tokenizer name to [" + + replacement + + "] instead." + ); + } + } + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java index 11d1653439e59..c998e927e25a8 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -34,7 +34,7 @@ public class EdgeNGramTokenizerTests extends ESTokenStreamTestCase { - private static IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) throws IOException { + private IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version) @@ -54,6 +54,7 @@ public void testPreConfiguredTokenizer() throws IOException { assertNotNull(analyzer); assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); } + } public void testCustomTokenChars() throws IOException { diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java index 8c365a1362f85..35c01b5b9296f 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java @@ -161,7 +161,7 @@ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception { for (int i = 0; i < iters; i++) { final Index index = new Index("test", "_na_"); final String name = "ngr"; - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); boolean reverse = random().nextBoolean(); if (reverse) { diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java index 7b962538c2a10..153c3e9549285 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PersianAnalyzerProviderTests.java @@ -56,7 +56,7 @@ public void testPersianAnalyzerPostLucene10() throws IOException { public void testPersianAnalyzerPreLucene10() throws IOException { IndexVersion preLucene10Version = IndexVersionUtils.randomVersionBetween( random(), - IndexVersionUtils.getFirstVersion(), + IndexVersionUtils.getLowestReadCompatibleVersion(), IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) ); Settings settings = ESTestCase.indexSettings(1, 1) diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java index 1af44bc71f35d..29e27e62e3164 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/RomanianAnalyzerTests.java @@ -57,7 +57,7 @@ public void testRomanianAnalyzerPostLucene10() throws IOException { public void testRomanianAnalyzerPreLucene10() throws IOException { IndexVersion preLucene10Version = IndexVersionUtils.randomVersionBetween( random(), - IndexVersionUtils.getFirstVersion(), + IndexVersionUtils.getLowestReadCompatibleVersion(), IndexVersionUtils.getPreviousVersion(IndexVersions.UPGRADE_TO_LUCENE_10_0_0) ); Settings settings = ESTestCase.indexSettings(1, 1) diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java index bb06c221873b5..4e774d92e3d62 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -39,7 +39,7 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { public void testEnglishFilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_english.type", "stemmer") .put("index.analysis.filter.my_english.language", "english") @@ -66,7 +66,7 @@ public void testPorter2FilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_porter2.type", "stemmer") .put("index.analysis.filter.my_porter2.language", "porter2") @@ -90,7 +90,7 @@ public void testPorter2FilterFactory() throws IOException { } public void testMultipleLanguagesThrowsException() throws IOException { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_english.type", "stemmer") .putList("index.analysis.filter.my_english.language", "english", "light_english") @@ -142,7 +142,7 @@ private static Analyzer createGermanStemmer(String variant, IndexVersion v) thro } public void testKpDeprecation() throws IOException { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_kp.type", "stemmer") .put("index.analysis.filter.my_kp.language", "kp") @@ -155,7 +155,7 @@ public void testKpDeprecation() throws IOException { } public void testLovinsDeprecation() throws IOException { - IndexVersion v = IndexVersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(); Settings settings = Settings.builder() .put("index.analysis.filter.my_lovins.type", "stemmer") .put("index.analysis.filter.my_lovins.language", "lovins") diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index 4fc6ca96b5f08..af57b8270ff02 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -118,7 +118,7 @@ public void testSynonymWordDeleteByAnalyzer() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -177,7 +177,7 @@ public void testSynonymWordDeleteByAnalyzerFromFile() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -231,7 +231,7 @@ public void testExpandSynonymWordDeleteByAnalyzer() throws IOException { // Test with an index version where lenient should always be false by default IndexVersion randomNonLenientIndexVersion = IndexVersionUtils.randomVersionBetween( random(), - IndexVersions.MINIMUM_COMPATIBLE, + IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.INDEX_SORTING_ON_NESTED ); assertIsNotLenient.accept(randomNonLenientIndexVersion, false); @@ -338,7 +338,7 @@ public void testShingleFilters() { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") @@ -392,7 +392,7 @@ public void testPreconfiguredTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); @@ -424,7 +424,7 @@ public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .putList("common_words", "a", "b") diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java index 6bec8dc1ebc62..d30e9d3c68cc9 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/UniqueTokenFilterTests.java @@ -124,11 +124,7 @@ public void testOldVersionGetXUniqueTokenFilter() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.MINIMUM_COMPATIBLE, - IndexVersionUtils.getPreviousVersion(IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX) - ) + IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.UNIQUE_TOKEN_FILTER_POS_FIX) ) .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); diff --git a/modules/apm/build.gradle b/modules/apm/build.gradle index b510e2403e933..07e6c7a042135 100644 --- a/modules/apm/build.gradle +++ b/modules/apm/build.gradle @@ -7,9 +7,9 @@ apply plugin: 'elasticsearch.internal-es-plugin' esplugin { - name 'apm' - description 'Provides APM integration for Elasticsearch' - classname 'org.elasticsearch.telemetry.apm.APM' + name = 'apm' + description = 'Provides APM integration for Elasticsearch' + classname ='org.elasticsearch.telemetry.apm.APM' } def otelVersion = '1.31.0' diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java index 339a4ec24ca13..43447cfa21a62 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java @@ -92,14 +92,7 @@ public List> getSettings() { APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING, APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING, - APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES, - // The settings below are deprecated and are currently kept as fallback. - APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING, - APMAgentSettings.TRACING_APM_API_KEY_SETTING, - APMAgentSettings.TRACING_APM_ENABLED_SETTING, - APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING, - APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING, - APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES + APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES ); } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java index 99b2a4510bf93..8647761e2defe 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java @@ -25,9 +25,7 @@ import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.function.Function; -import static org.elasticsearch.common.settings.Setting.Property.DeprecatedWarning; import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; @@ -101,9 +99,6 @@ public void setAgentSetting(String key, String value) { private static final String TELEMETRY_SETTING_PREFIX = "telemetry."; - // The old legacy prefix - private static final String LEGACY_TRACING_APM_SETTING_PREFIX = "tracing.apm."; - /** * Allow-list of APM agent config keys users are permitted to configure. * @see APM Java Agent Configuration @@ -248,56 +243,24 @@ private static Setting concreteAgentSetting(String namespace, String qua public static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting( TELEMETRY_SETTING_PREFIX + "agent.", - LEGACY_TRACING_APM_SETTING_PREFIX + "agent.", - (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX) - ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, DeprecatedWarning) - : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) + null, // no fallback + (namespace, qualifiedKey) -> concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) ); - /** - * @deprecated in favor of TELEMETRY_TRACING_NAMES_INCLUDE_SETTING. - */ - @Deprecated - public static final Setting> TRACING_APM_NAMES_INCLUDE_SETTING = Setting.stringListSetting( - LEGACY_TRACING_APM_SETTING_PREFIX + "names.include", - OperatorDynamic, - NodeScope, - DeprecatedWarning - ); - - public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting( + public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting( TELEMETRY_SETTING_PREFIX + "tracing.names.include", - TRACING_APM_NAMES_INCLUDE_SETTING, - Function.identity(), OperatorDynamic, NodeScope ); - /** - * @deprecated in favor of TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING. - */ - @Deprecated - public static final Setting> TRACING_APM_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( - LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude", - OperatorDynamic, - NodeScope, - DeprecatedWarning - ); - - public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting( + public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( TELEMETRY_SETTING_PREFIX + "tracing.names.exclude", - TRACING_APM_NAMES_EXCLUDE_SETTING, - Function.identity(), OperatorDynamic, NodeScope ); - /** - * @deprecated in favor of TELEMETRY_TRACING_SANITIZE_FIELD_NAMES. - */ - @Deprecated - public static final Setting> TRACING_APM_SANITIZE_FIELD_NAMES = Setting.stringListSetting( - LEGACY_TRACING_APM_SETTING_PREFIX + "sanitize_field_names", + public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting( + TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names", List.of( "password", "passwd", @@ -313,33 +276,12 @@ private static Setting concreteAgentSetting(String namespace, String qua "set-cookie" ), OperatorDynamic, - NodeScope, - DeprecatedWarning - ); - - public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting( - TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names", - TRACING_APM_SANITIZE_FIELD_NAMES, - Function.identity(), - OperatorDynamic, NodeScope ); - /** - * @deprecated in favor of TELEMETRY_TRACING_ENABLED_SETTING. - */ - @Deprecated - public static final Setting TRACING_APM_ENABLED_SETTING = Setting.boolSetting( - LEGACY_TRACING_APM_SETTING_PREFIX + "enabled", - false, - OperatorDynamic, - NodeScope, - DeprecatedWarning - ); - public static final Setting TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting( TELEMETRY_SETTING_PREFIX + "tracing.enabled", - TRACING_APM_ENABLED_SETTING, + false, OperatorDynamic, NodeScope ); @@ -351,33 +293,13 @@ private static Setting concreteAgentSetting(String namespace, String qua NodeScope ); - /** - * @deprecated in favor of TELEMETRY_SECRET_TOKEN_SETTING. - */ - @Deprecated - public static final Setting TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString( - LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token", - null, - DeprecatedWarning - ); - public static final Setting TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString( TELEMETRY_SETTING_PREFIX + "secret_token", - TRACING_APM_SECRET_TOKEN_SETTING - ); - - /** - * @deprecated in favor of TELEMETRY_API_KEY_SETTING. - */ - @Deprecated - public static final Setting TRACING_APM_API_KEY_SETTING = SecureSetting.secureString( - LEGACY_TRACING_APM_SETTING_PREFIX + "api_key", - null, - DeprecatedWarning + null ); public static final Setting TELEMETRY_API_KEY_SETTING = SecureSetting.secureString( TELEMETRY_SETTING_PREFIX + "api_key", - TRACING_APM_API_KEY_SETTING + null ); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java index cb74d62137815..f60179d533950 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; +import org.elasticsearch.lucene.util.automaton.MinimizationOperations; import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.apm.internal.APMAgentSettings; import org.elasticsearch.telemetry.tracing.TraceContext; @@ -439,7 +440,7 @@ private static CharacterRunAutomaton buildAutomaton(List includePatterns ? includeAutomaton : Operations.minus(includeAutomaton, excludeAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); - return new CharacterRunAutomaton(Operations.determinize(finalAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT)); + return new CharacterRunAutomaton(MinimizationOperations.minimize(finalAutomaton, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT)); } private static Automaton patternsToAutomaton(List patterns) { diff --git a/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml b/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..9c10bafca42f9 --- /dev/null +++ b/modules/apm/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,4 @@ +org.elasticsearch.telemetry.apm: + - create_class_loader +elastic.apm.agent: + - set_https_connection_properties diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java index a60048c82a3c9..5516672420924 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java @@ -11,8 +11,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.mockito.Mockito; @@ -21,21 +19,13 @@ import java.util.Set; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_API_KEY_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_SECRET_TOKEN_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING; import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_API_KEY_SETTING; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_ENABLED_SETTING; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES; -import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasItem; import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; @@ -70,14 +60,6 @@ public void testEnableTracing() { } } - public void testEnableTracingUsingLegacySetting() { - Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), true).build(); - apmAgentSettings.initAgentSystemProperties(settings); - - verify(apmAgentSettings).setAgentSetting("recording", "true"); - assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release."); - } - public void testEnableMetrics() { for (boolean tracingEnabled : List.of(true, false)) { clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); @@ -121,14 +103,6 @@ public void testDisableTracing() { } } - public void testDisableTracingUsingLegacySetting() { - Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), false).build(); - apmAgentSettings.initAgentSystemProperties(settings); - - verify(apmAgentSettings).setAgentSetting("recording", "false"); - assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release."); - } - public void testDisableMetrics() { for (boolean tracingEnabled : List.of(true, false)) { clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService()); @@ -181,70 +155,18 @@ public void testSetAgentSettings() { verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); } - public void testSetAgentsSettingsWithLegacyPrefix() { - Settings settings = Settings.builder() - .put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true) - .put("tracing.apm.agent.span_compression_enabled", "true") - .build(); - apmAgentSettings.initAgentSystemProperties(settings); - - verify(apmAgentSettings).setAgentSetting("recording", "true"); - verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true"); - assertWarnings( - "[tracing.apm.agent.span_compression_enabled] setting was deprecated in Elasticsearch and will be removed in a future release." - ); - } - /** * Check that invalid or forbidden APM agent settings are rejected. */ public void testRejectForbiddenOrUnknownAgentSettings() { - List prefixes = List.of(APM_AGENT_SETTINGS.getKey(), "tracing.apm.agent."); - for (String prefix : prefixes) { - Settings settings = Settings.builder().put(prefix + "unknown", "true").build(); - Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings)); - assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]")); - } - // though, accept / ignore nested global_labels - for (String prefix : prefixes) { - Settings settings = Settings.builder().put(prefix + "global_labels.abc", "123").build(); - APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(settings); - - if (prefix.startsWith("tracing.apm.agent.")) { - assertWarnings( - "[tracing.apm.agent.global_labels.abc] setting was deprecated in Elasticsearch and will be removed in a future release." - ); - } - } - } - - public void testTelemetryTracingNamesIncludeFallback() { - Settings settings = Settings.builder().put(TRACING_APM_NAMES_INCLUDE_SETTING.getKey(), "abc,xyz").build(); - - List included = TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.get(settings); - - assertThat(included, containsInAnyOrder("abc", "xyz")); - assertWarnings("[tracing.apm.names.include] setting was deprecated in Elasticsearch and will be removed in a future release."); - } - - public void testTelemetryTracingNamesExcludeFallback() { - Settings settings = Settings.builder().put(TRACING_APM_NAMES_EXCLUDE_SETTING.getKey(), "abc,xyz").build(); - - List included = TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.get(settings); - - assertThat(included, containsInAnyOrder("abc", "xyz")); - assertWarnings("[tracing.apm.names.exclude] setting was deprecated in Elasticsearch and will be removed in a future release."); - } - - public void testTelemetryTracingSanitizeFieldNamesFallback() { - Settings settings = Settings.builder().put(TRACING_APM_SANITIZE_FIELD_NAMES.getKey(), "abc,xyz").build(); - - List included = TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.get(settings); + String prefix = APM_AGENT_SETTINGS.getKey(); + Settings settings = Settings.builder().put(prefix + "unknown", "true").build(); + Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings)); + assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]")); - assertThat(included, containsInAnyOrder("abc", "xyz")); - assertWarnings( - "[tracing.apm.sanitize_field_names] setting was deprecated in Elasticsearch and will be removed in a future release." - ); + // though, accept / ignore nested global_labels + var map = APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(Settings.builder().put(prefix + "global_labels.abc", "123").build()); + assertThat(map, hasEntry("global_labels.abc", "123")); } public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() { @@ -252,28 +174,6 @@ public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() { assertThat(included, hasItem("password")); // and more defaults } - public void testTelemetrySecretTokenFallback() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(TRACING_APM_SECRET_TOKEN_SETTING.getKey(), "verysecret"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - - try (SecureString secureString = TELEMETRY_SECRET_TOKEN_SETTING.get(settings)) { - assertEquals("verysecret", secureString.toString()); - } - assertWarnings("[tracing.apm.secret_token] setting was deprecated in Elasticsearch and will be removed in a future release."); - } - - public void testTelemetryApiKeyFallback() { - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(TRACING_APM_API_KEY_SETTING.getKey(), "abc"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - - try (SecureString secureString = TELEMETRY_API_KEY_SETTING.get(settings)) { - assertEquals("abc", secureString.toString()); - } - assertWarnings("[tracing.apm.api_key] setting was deprecated in Elasticsearch and will be removed in a future release."); - } - /** * Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting. */ diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index 8ae56101ef01e..60bc8d1dc6a92 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -7,8 +7,8 @@ apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { - description 'Elasticsearch Expanded Pack Plugin - Data Streams' - classname 'org.elasticsearch.datastreams.DataStreamsPlugin' + description = 'Elasticsearch Expanded Pack Plugin - Data Streams' + classname ='org.elasticsearch.datastreams.DataStreamsPlugin' } restResources { @@ -20,6 +20,7 @@ restResources { dependencies { testImplementation project(path: ':test:test-clusters') + testImplementation project(":modules:mapper-extras") internalClusterTestImplementation project(":modules:mapper-extras") } @@ -70,4 +71,16 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store with conditions", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/200_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/200_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + + task.skipTest("data_stream/210_rollover_failure_store/A failure store marked for lazy rollover should only be rolled over when there is a failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store with conditions", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after an ingest failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store without conditions", "Rolling over a data stream using target_failure_store is no longer supported.") }) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java index ac73385a97d70..91f18ad3573fd 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -493,7 +493,7 @@ private static ShardStats getShardStats(IndexMetadata indexMeta, int shardIndex, CommonStats stats = new CommonStats(); stats.docs = new DocsStats(100, 0, randomByteSizeValue().getBytes()); stats.store = new StoreStats(); - stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); + stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); return new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, false, 0); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 0e03045a090f8..2739eb51376ea 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -588,11 +588,15 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(dataStreamName, indicesAdmin().prepareForceMerge(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().prepareValidateQuery(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().prepareRecoveries(dataStreamName), false); - verifyResolvability(dataStreamName, indicesAdmin().prepareGetAliases("dummy").setIndices(dataStreamName), false); + verifyResolvability( + dataStreamName, + indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "dummy").setIndices(dataStreamName), + false + ); verifyResolvability(dataStreamName, indicesAdmin().prepareGetFieldMappings(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().preparePutMapping(dataStreamName).setSource(""" {"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false); - verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(dataStreamName), false); + verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, dataStreamName), false); verifyResolvability( dataStreamName, indicesAdmin().prepareUpdateSettings(dataStreamName).setSettings(Settings.builder().put("index.number_of_replicas", 0)), @@ -635,11 +639,11 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(wildcardExpression, indicesAdmin().prepareForceMerge(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareValidateQuery(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareRecoveries(wildcardExpression), false); - verifyResolvability(wildcardExpression, indicesAdmin().prepareGetAliases(wildcardExpression), false); + verifyResolvability(wildcardExpression, indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareGetFieldMappings(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().preparePutMapping(wildcardExpression).setSource(""" {"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false); - verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(wildcardExpression), false); + verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareGetSettings(wildcardExpression), false); verifyResolvability( wildcardExpression, @@ -718,7 +722,7 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio TransportDeleteComposableIndexTemplateAction.Request deleteRequest = new TransportDeleteComposableIndexTemplateAction.Request("id"); client().execute(TransportDeleteComposableIndexTemplateAction.TYPE, deleteRequest).get(); - GetComposableIndexTemplateAction.Request getReq = new GetComposableIndexTemplateAction.Request("id"); + GetComposableIndexTemplateAction.Request getReq = new GetComposableIndexTemplateAction.Request(TEST_REQUEST_TIMEOUT, "id"); Exception e3 = expectThrows(Exception.class, client().execute(GetComposableIndexTemplateAction.INSTANCE, getReq)); maybeE = ExceptionsHelper.unwrapCausesAndSuppressed(e3, err -> err.getMessage().contains("index template matching [id] not found")); assertTrue(maybeE.isPresent()); @@ -738,7 +742,7 @@ public void testAliasActionsOnDataStreams() throws Exception { IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat( response.getDataStreamAliases(), equalTo(Map.of("metrics-foo", List.of(new DataStreamAlias("foo", List.of("metrics-foo"), null, null)))) @@ -764,7 +768,7 @@ public void testDataSteamAliasWithFilter() throws Exception { IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat( response.getDataStreamAliases(), equalTo( @@ -794,7 +798,7 @@ public void testDataSteamAliasWithFilter() throws Exception { aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat( response.getDataStreamAliases(), equalTo( @@ -840,7 +844,7 @@ public void testSearchFilteredAndUnfilteredAlias() throws Exception { aliasesAddRequest.addAliasAction(addFilteredAliasAction); aliasesAddRequest.addAliasAction(addUnfilteredAliasAction); assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat(response.getDataStreamAliases(), hasKey("logs-foobar")); assertThat( response.getDataStreamAliases().get("logs-foobar"), @@ -884,7 +888,7 @@ public void testRandomDataSteamAliasesUpdate() throws Exception { addAction = new AliasActions(AliasActions.Type.ADD).aliases(alias).indices(dataStreams[0]).filter(indexFilters).writeIndex(true); assertAcked(indicesAdmin().aliases(new IndicesAliasesRequest().addAliasAction(addAction)).actionGet()); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat(response.getDataStreamAliases().size(), equalTo(dataStreams.length)); List result = response.getDataStreamAliases() .values() @@ -933,7 +937,7 @@ public void testDataSteamAliasWithMalformedFilter() throws Exception { indicesAdmin().aliases(new IndicesAliasesRequest().addAliasAction(addAction)) ); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [" + alias + "]")); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat(response.getDataStreamAliases(), anEmptyMap()); } @@ -998,7 +1002,7 @@ public void testRemoveDataStreamAliasesMixedExpression() throws Exception { aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("metrics-foo").aliases("my-alias1")); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("metrics-myindex").aliases("my-alias2")); assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat( response.getDataStreamAliases(), equalTo(Map.of("metrics-foo", List.of(new DataStreamAlias("my-alias1", List.of("metrics-foo"), null, null)))) @@ -1013,7 +1017,7 @@ public void testRemoveDataStreamAliasesMixedExpression() throws Exception { aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE).index("_all").aliases("my-*")); } assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat(response.getDataStreamAliases(), anEmptyMap()); assertThat(response.getAliases().get("metrics-myindex").size(), equalTo(0)); assertThat(response.getAliases().size(), equalTo(1)); @@ -1034,7 +1038,7 @@ public void testUpdateDataStreamsWithWildcards() throws Exception { new AliasActions(AliasActions.Type.ADD).index("metrics-foo").aliases("my-alias1", "my-alias2") ); assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat(response.getDataStreamAliases().keySet(), containsInAnyOrder("metrics-foo")); assertThat( response.getDataStreamAliases().get("metrics-foo"), @@ -1060,7 +1064,7 @@ public void testUpdateDataStreamsWithWildcards() throws Exception { aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE).index("_all").aliases("_all")); } assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); - GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); + GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat(response.getDataStreamAliases(), anEmptyMap()); assertThat(response.getAliases().size(), equalTo(0)); } @@ -1176,7 +1180,7 @@ public void testUpdateMappingViaDataStream() throws Exception { DataStreamTimestampFieldMapper.NAME, Map.of("enabled", true) ); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get(); assertThat(getMappingsResponse.getMappings().size(), equalTo(2)); assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping)); assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping)); @@ -1191,7 +1195,7 @@ public void testUpdateMappingViaDataStream() throws Exception { .setSource("{\"properties\":{\"my_field\":{\"type\":\"keyword\"}}}", XContentType.JSON) .get(); // The mappings of all backing indices should be updated: - getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get(); + getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get(); assertThat(getMappingsResponse.getMappings().size(), equalTo(2)); assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping)); assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping)); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 286ad68896797..f6c703b96888c 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -31,11 +31,13 @@ import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAlias; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.index.Index; @@ -60,7 +62,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -77,6 +78,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase { @@ -136,27 +138,17 @@ public void setup() throws Exception { assertTrue(response.isAcknowledged()); // Initialize the failure store. - RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + RolloverRequest rolloverRequest = new RolloverRequest("with-fs::failures", null); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); assertTrue(response.isAcknowledged()); // Resolve backing index names after data streams have been created: // (these names have a date component, and running around midnight could lead to test failures otherwise) - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); - GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); - dsBackingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(); - otherDsBackingIndexName = getDataStreamResponse.getDataStreams().get(1).getDataStream().getIndices().get(0).getName(); - fsBackingIndexName = getDataStreamResponse.getDataStreams().get(2).getDataStream().getIndices().get(0).getName(); - fsFailureIndexName = getDataStreamResponse.getDataStreams() - .get(2) - .getDataStream() - .getFailureIndices() - .getIndices() - .get(0) - .getName(); + List dataStreamInfos = getDataStreamInfo("*"); + dsBackingIndexName = dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(); + otherDsBackingIndexName = dataStreamInfos.get(1).getDataStream().getIndices().get(0).getName(); + fsBackingIndexName = dataStreamInfos.get(2).getDataStream().getIndices().get(0).getName(); + fsFailureIndexName = dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().get(0).getName(); // Will be used in some tests, to test renaming while restoring a snapshot: ds2BackingIndexName = dsBackingIndexName.replace("-ds-", "-ds2-"); @@ -198,9 +190,7 @@ public void testSnapshotAndRestore() throws Exception { assertEquals(Collections.singletonList(dsBackingIndexName), getSnapshot(REPO, SNAPSHOT).indices()); - assertAcked( - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" })) - ); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "ds"))); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() @@ -218,15 +208,15 @@ public void testSnapshotAndRestore() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List ds = getDataStreamInfo("ds"); + assertEquals(1, ds.size()); + assertEquals(1, ds.get(0).getDataStream().getIndices().size()); + assertEquals(dsBackingIndexName, ds.get(0).getDataStream().getIndices().get(0).getName()); - GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); + GetAliasesResponse getAliasesResponse = client.admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "my-alias")) + .actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getName(), equalTo("my-alias")); @@ -278,19 +268,18 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); - GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).get(); + List dataStreamInfos = getDataStreamInfo("*"); assertThat( - ds.getDataStreams().stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), + dataStreamInfos.stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), contains(equalTo("ds"), equalTo("other-ds"), equalTo("with-fs")) ); - List backingIndices = ds.getDataStreams().get(0).getDataStream().getIndices(); + List backingIndices = dataStreamInfos.get(0).getDataStream().getIndices(); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(dsBackingIndexName)); - backingIndices = ds.getDataStreams().get(1).getDataStream().getIndices(); + backingIndices = dataStreamInfos.get(1).getDataStream().getIndices(); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(otherDsBackingIndexName)); - backingIndices = ds.getDataStreams().get(2).getDataStream().getIndices(); + backingIndices = dataStreamInfos.get(2).getDataStream().getIndices(); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(fsBackingIndexName)); - List failureIndices = ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices(); + List failureIndices = dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices(); assertThat(failureIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(fsFailureIndexName)); } @@ -337,14 +326,10 @@ public void testSnapshotAndRestoreInPlace() { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); - GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).actionGet(); - assertThat( - ds.getDataStreams().stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), - contains(equalTo("ds")) - ); - List backingIndices = ds.getDataStreams().get(0).getDataStream().getIndices(); - assertThat(ds.getDataStreams().get(0).getDataStream().getIndices(), hasSize(1)); + List dsInfo = getDataStreamInfo("ds"); + assertThat(dsInfo.stream().map(e -> e.getDataStream().getName()).collect(Collectors.toList()), contains(equalTo("ds"))); + List backingIndices = dsInfo.get(0).getDataStream().getIndices(); + assertThat(dsInfo.get(0).getDataStream().getIndices(), hasSize(1)); assertThat(backingIndices.stream().map(Index::getName).collect(Collectors.toList()), contains(equalTo(dsBackingIndexName))); // The backing index created as part of rollover should still exist (but just not part of the data stream) @@ -357,39 +342,40 @@ public void testSnapshotAndRestoreInPlace() { } public void testFailureStoreSnapshotAndRestore() throws Exception { + String dataStreamName = "with-fs"; CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices("with-fs") + .setIndices(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.ALL_APPLICABLE)) .setIncludeGlobalState(false) .get(); RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); assertEquals(RestStatus.OK, status); + assertThat(getSnapshot(REPO, SNAPSHOT).dataStreams(), containsInAnyOrder(dataStreamName)); assertThat(getSnapshot(REPO, SNAPSHOT).indices(), containsInAnyOrder(fsBackingIndexName, fsFailureIndexName)); - assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "with-fs"))); + assertAcked( + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, dataStreamName)) + ); { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices("with-fs") + .setIndices(dataStreamName) .get(); assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "with-fs" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(fsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(fsFailureIndexName, ds.getDataStreams().get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo(dataStreamName); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(fsBackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(fsFailureIndexName, dataStreamInfos.get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); } { // With rename pattern @@ -397,21 +383,18 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { .cluster() .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices("with-fs") + .setIndices(dataStreamName) .setRenamePattern("-fs") .setRenameReplacement("-fs2") .get(); assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "with-fs2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(fs2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(fs2FailureIndexName, ds.getDataStreams().get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("with-fs2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(fs2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(fs2FailureIndexName, dataStreamInfos.get(0).getDataStream().getFailureIndices().getIndices().get(0).getName()); } } @@ -477,15 +460,15 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { dataStreamToSnapshot }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(backingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo(dataStreamToSnapshot); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(backingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); - GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); + GetAliasesResponse getAliasesResponse = client.admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "my-alias")) + .actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), contains(dataStreamToSnapshot)); assertThat( getAliasesResponse.getDataStreamAliases().get(dataStreamToSnapshot), @@ -536,17 +519,17 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) - ).get(); - assertEquals(3, ds.getDataStreams().size()); + List dataStreamInfos = getDataStreamInfo("*"); + assertEquals(3, dataStreamInfos.size()); assertThat( - ds.getDataStreams().stream().map(i -> i.getDataStream().getName()).collect(Collectors.toList()), + dataStreamInfos.stream().map(i -> i.getDataStream().getName()).collect(Collectors.toList()), containsInAnyOrder("ds", "other-ds", "with-fs") ); - GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); + GetAliasesResponse getAliasesResponse = client.admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "my-alias")) + .actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getName(), equalTo("my-alias")); @@ -596,21 +579,21 @@ public void testSnapshotAndRestoreAll() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) - ).get(); - assertEquals(3, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(1).getDataStream().getIndices().size()); - assertEquals(otherDsBackingIndexName, ds.getDataStreams().get(1).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size()); - assertEquals(fsBackingIndexName, ds.getDataStreams().get(2).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().size()); - assertEquals(fsFailureIndexName, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); - - GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); + List dataStreamInfos = getDataStreamInfo("*"); + assertEquals(3, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(dsBackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(1).getDataStream().getIndices().size()); + assertEquals(otherDsBackingIndexName, dataStreamInfos.get(1).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getIndices().size()); + assertEquals(fsBackingIndexName, dataStreamInfos.get(2).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().size()); + assertEquals(fsFailureIndexName, dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); + + GetAliasesResponse getAliasesResponse = client.admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "my-alias")) + .actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").size(), equalTo(1)); @@ -667,21 +650,21 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); }); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }) - ).get(); - assertEquals(3, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(dsBackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(1).getDataStream().getIndices().size()); - assertEquals(otherDsBackingIndexName, ds.getDataStreams().get(1).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size()); - assertEquals(fsBackingIndexName, ds.getDataStreams().get(2).getDataStream().getIndices().get(0).getName()); - assertEquals(1, ds.getDataStreams().get(2).getDataStream().getIndices().size()); - assertEquals(fsFailureIndexName, ds.getDataStreams().get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); - - GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("*")).actionGet(); + List dataStreamInfos = getDataStreamInfo("*"); + assertEquals(3, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(dsBackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(1).getDataStream().getIndices().size()); + assertEquals(otherDsBackingIndexName, dataStreamInfos.get(1).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getIndices().size()); + assertEquals(fsBackingIndexName, dataStreamInfos.get(2).getDataStream().getIndices().get(0).getName()); + assertEquals(1, dataStreamInfos.get(2).getDataStream().getIndices().size()); + assertEquals(fsFailureIndexName, dataStreamInfos.get(2).getDataStream().getFailureIndices().getIndices().get(0).getName()); + + GetAliasesResponse getAliasesResponse = client.admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "*")) + .actionGet(); assertThat(getAliasesResponse.getDataStreamAliases(), anEmptyMap()); assertAcked( client().execute( @@ -721,20 +704,20 @@ public void testRename() throws Exception { .setRenameReplacement("ds2") .get(); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(ds2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("ds2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(ds2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); assertResponse( client.prepareSearch("ds2"), response -> assertEquals(DOCUMENT_SOURCE, response.getHits().getHits()[0].getSourceAsMap()) ); assertEquals(DOCUMENT_SOURCE, client.prepareGet(ds2BackingIndexName, id).get().getSourceAsMap()); - GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); + GetAliasesResponse getAliasesResponse = client.admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "my-alias")) + .actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "ds2", "other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds2").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("ds2").get(0).getName(), equalTo("my-alias")); @@ -779,15 +762,15 @@ public void testRenameWriteDataStream() throws Exception { .setRenameReplacement("other-ds2") .get(); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "other-ds2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(otherDs2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("other-ds2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(otherDs2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); - GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); + GetAliasesResponse getAliasesResponse = client.admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "my-alias")) + .actionGet(); assertThat(getAliasesResponse.getDataStreamAliases().keySet(), containsInAnyOrder("ds", "other-ds", "other-ds2")); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds2").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds2").get(0).getName(), equalTo("my-alias")); @@ -849,9 +832,8 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK)); - GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); - GetDataStreamAction.Response response = client.execute(GetDataStreamAction.INSTANCE, getDSRequest).actionGet(); - assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); + List dataStreamInfos = getDataStreamInfo("ds"); + assertThat(dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); } public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { @@ -888,17 +870,15 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { assertThat(restoreSnapshotResponse.status(), is(RestStatus.OK)); // assert "ds" was restored as "test-ds" and the backing index has a valid name - GetDataStreamAction.Request getRenamedDS = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "test-ds" }); - GetDataStreamAction.Response response = client.execute(GetDataStreamAction.INSTANCE, getRenamedDS).actionGet(); + List dataStreamInfos = getDataStreamInfo("test-ds"); assertThat( - response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), + dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(), is(DataStream.getDefaultBackingIndexName("test-ds", 1L)) ); // data stream "ds" should still exist in the system - GetDataStreamAction.Request getDSRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds" }); - response = client.execute(GetDataStreamAction.INSTANCE, getDSRequest).actionGet(); - assertThat(response.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); + dataStreamInfos = getDataStreamInfo("ds"); + assertThat(dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName(), is(dsBackingIndexName)); } public void testWildcards() throws Exception { @@ -924,16 +904,13 @@ public void testWildcards() throws Exception { assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); - GetDataStreamAction.Response ds = client.execute( - GetDataStreamAction.INSTANCE, - new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "ds2" }) - ).get(); - assertEquals(1, ds.getDataStreams().size()); - assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); - assertEquals(ds2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); + List dataStreamInfos = getDataStreamInfo("ds2"); + assertEquals(1, dataStreamInfos.size()); + assertEquals(1, dataStreamInfos.get(0).getDataStream().getIndices().size()); + assertEquals(ds2BackingIndexName, dataStreamInfos.get(0).getDataStream().getIndices().get(0).getName()); assertThat( "we renamed the restored data stream to one that doesn't match any existing composable template", - ds.getDataStreams().get(0).getIndexTemplate(), + dataStreamInfos.get(0).getIndexTemplate(), is(nullValue()) ); } @@ -955,7 +932,7 @@ public void testDataStreamNotStoredWhenIndexRequested() { ); } - public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { + public void testDataStreamNotRestoredWhenIndexRequested() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, "snap2") @@ -984,7 +961,7 @@ public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { expectThrows(ResourceNotFoundException.class, client.execute(GetDataStreamAction.INSTANCE, getRequest)); } - public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionException, InterruptedException { + public void testDataStreamNotIncludedInLimitedSnapshot() { final String snapshotName = "test-snap"; CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() @@ -1042,12 +1019,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { assertDocCount(dataStream, 100L); // Resolve backing index name after the data stream has been created because it has a date component, // and running around midnight could lead to test failures otherwise - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request( - TEST_REQUEST_TIMEOUT, - new String[] { dataStream } - ); - GetDataStreamAction.Response getDataStreamResponse = client.execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet(); - String backingIndexName = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().get(0).getName(); + String backingIndexName = getDataStreamInfo(dataStream).get(0).getDataStream().getIndices().get(0).getName(); logger.info("--> snapshot"); ActionFuture future = client1.admin() @@ -1235,7 +1207,7 @@ public void testSnapshotDSDuringRolloverAndDeleteOldIndex() throws Exception { assertEquals(restoreSnapshotResponse.failedShards(), 0); } - public void testExcludeDSFromSnapshotWhenExcludingItsIndices() { + public void testExcludeDSFromSnapshotWhenExcludingAnyOfItsIndices() { final String snapshot = "test-snapshot"; final String indexWithoutDataStream = "test-idx-no-ds"; createIndexWithContent(indexWithoutDataStream); @@ -1251,10 +1223,47 @@ public void testExcludeDSFromSnapshotWhenExcludingItsIndices() { .getRestoreInfo(); assertThat(restoreInfo.failedShards(), is(0)); assertThat(restoreInfo.successfulShards(), is(1)); + + // Exclude only failure store indices + { + String dataStreamName = "with-fs"; + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIndices(dataStreamName + "*", "-.fs*") + .setIncludeGlobalState(false) + .get(); + + RestStatus status = createSnapshotResponse.getSnapshotInfo().status(); + assertEquals(RestStatus.OK, status); + + SnapshotInfo retrievedSnapshot = getSnapshot(REPO, SNAPSHOT); + assertThat(retrievedSnapshot.dataStreams(), contains(dataStreamName)); + assertThat(retrievedSnapshot.indices(), containsInAnyOrder(fsBackingIndexName)); + + assertAcked( + safeGet(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*"))) + ); + + RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIndices(dataStreamName) + .get() + .getRestoreInfo(); + + assertThat(restoreSnapshotResponse, notNullValue()); + assertThat(restoreSnapshotResponse.successfulShards(), equalTo(restoreSnapshotResponse.totalShards())); + assertThat(restoreSnapshotResponse.failedShards(), is(0)); + + GetDataStreamAction.Response.DataStreamInfo dataStream = getDataStreamInfo(dataStreamName).getFirst(); + assertThat(dataStream.getDataStream().getBackingIndices().getIndices(), not(empty())); + assertThat(dataStream.getDataStream().getFailureIndices().getIndices(), empty()); + } } /** - * This test is a copy of the {@link #testExcludeDSFromSnapshotWhenExcludingItsIndices()} the only difference + * This test is a copy of the {@link #testExcludeDSFromSnapshotWhenExcludingAnyOfItsIndices()} ()} the only difference * is that one include the global state and one doesn't. In general this shouldn't matter that's why it used to be * a random parameter of the test, but because of #107515 it fails when we include the global state. Keep them * separate until this is fixed. @@ -1284,10 +1293,7 @@ public void testRestoreSnapshotFully() throws Exception { createIndexWithContent(indexName); createFullSnapshot(REPO, snapshotName); - assertAcked( - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" })) - .get() - ); + assertAcked(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).get()); assertAcked(client.admin().indices().prepareDelete("*").setIndicesOptions(IndicesOptions.lenientExpandOpenHidden()).get()); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() @@ -1297,8 +1303,7 @@ public void testRestoreSnapshotFully() throws Exception { .get(); assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); - GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] { "*" }); - assertThat(client.execute(GetDataStreamAction.INSTANCE, getRequest).get().getDataStreams(), hasSize(3)); + assertThat(getDataStreamInfo("*"), hasSize(3)); assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get()); } @@ -1326,7 +1331,7 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti } } - public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Exception { + public void testRestoreDataStreamAliasWithConflictingIndicesAlias() { var snapshotName = "test-snapshot"; createFullSnapshot(REPO, snapshotName); client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")).actionGet(); @@ -1484,4 +1489,8 @@ public void testWarningHeaderOnRestoreTemplateFromSnapshot() throws Exception { } + protected List getDataStreamInfo(String... dataStreamNames) { + GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, dataStreamNames); + return safeGet(client.execute(GetDataStreamAction.INSTANCE, getRequest)).getDataStreams(); + } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index e9eaf7b5faddb..bee3989d20ff0 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -20,11 +20,12 @@ import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.Strings; @@ -194,9 +195,9 @@ public void testRejectionFromFailureStore() throws IOException { createDataStream(); // Initialize failure store. - var rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() + var rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var failureStoreIndex = rolloverResponse.getNewIndex(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java index de6b7a682324e..54e21d5155ed1 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java @@ -12,6 +12,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestStatus; import org.junit.Before; import java.io.IOException; @@ -57,7 +60,7 @@ public void setup() throws IOException { assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); + assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "::failures/_rollover"))); ensureGreen(DATA_STREAM_NAME); final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); @@ -122,13 +125,25 @@ public void testExplicitlyResetDataStreamOptions() throws IOException { assertOK(client().performRequest(otherRequest)); } - public void testEnableDisableFailureStore() throws IOException { + public void testBehaviorWithEachFailureStoreOptionAndClusterSetting() throws IOException { { + // Default data stream options assertAcknowledged(client().performRequest(new Request("DELETE", "/_data_stream/" + DATA_STREAM_NAME + "/_options"))); - assertFailureStore(false, 1); + setDataStreamFailureStoreClusterSetting(DATA_STREAM_NAME); assertDataStreamOptions(null); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); + setDataStreamFailureStoreClusterSetting("does-not-match-failure-data-stream"); + assertDataStreamOptions(null); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); + setDataStreamFailureStoreClusterSetting(null); // should get same behaviour as when we set it to something non-matching + assertDataStreamOptions(null); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); } { + // Data stream options with failure store enabled Request enableRequest = new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME + "/_options"); enableRequest.setJsonEntity(""" { @@ -137,11 +152,21 @@ public void testEnableDisableFailureStore() throws IOException { } }"""); assertAcknowledged(client().performRequest(enableRequest)); - assertFailureStore(true, 1); + setDataStreamFailureStoreClusterSetting(DATA_STREAM_NAME); + assertDataStreamOptions(true); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); + setDataStreamFailureStoreClusterSetting("does-not-match-failure-data-stream"); // should have no effect as enabled in options assertDataStreamOptions(true); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); + setDataStreamFailureStoreClusterSetting(null); // same as previous + assertDataStreamOptions(true); + assertFailureStoreValuesInGetDataStreamResponse(true, 1); + assertRedirectsDocWithBadMappingToFailureStore(); } - { + // Data stream options with failure store disabled Request disableRequest = new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME + "/_options"); disableRequest.setJsonEntity(""" { @@ -150,13 +175,23 @@ public void testEnableDisableFailureStore() throws IOException { } }"""); assertAcknowledged(client().performRequest(disableRequest)); - assertFailureStore(false, 1); + setDataStreamFailureStoreClusterSetting(DATA_STREAM_NAME); // should have no effect as disabled in options assertDataStreamOptions(false); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); + setDataStreamFailureStoreClusterSetting("does-not-match-failure-data-stream"); + assertDataStreamOptions(false); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); + setDataStreamFailureStoreClusterSetting(null); + assertDataStreamOptions(false); + assertFailureStoreValuesInGetDataStreamResponse(false, 1); + assertFailsDocWithBadMapping(); } } @SuppressWarnings("unchecked") - private void assertFailureStore(boolean failureStoreEnabled, int failureStoreSize) throws IOException { + private void assertFailureStoreValuesInGetDataStreamResponse(boolean failureStoreEnabled, int failureStoreSize) throws IOException { final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); assertThat(dataStreams.size(), is(1)); @@ -198,4 +233,32 @@ private List getIndices(Map response) { List> indices = (List>) response.get("indices"); return indices.stream().map(index -> index.get("index_name")).toList(); } + + private static void setDataStreamFailureStoreClusterSetting(String value) throws IOException { + updateClusterSettings( + Settings.builder().put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), value).build() + ); + } + + private Response putDocumentWithBadMapping() throws IOException { + Request request = new Request("POST", DATA_STREAM_NAME + "/_doc"); + request.setJsonEntity(""" + { + "@timestamp": "not a timestamp", + "foo": "bar" + } + """); + return client().performRequest(request); + } + + private void assertRedirectsDocWithBadMappingToFailureStore() throws IOException { + Response response = putDocumentWithBadMapping(); + String failureStoreResponse = (String) entityAsMap(response).get("failure_store"); + assertThat(failureStoreResponse, is("used")); + } + + private void assertFailsDocWithBadMapping() { + ResponseException e = assertThrows(ResponseException.class, this::putDocumentWithBadMapping); + assertThat(e.getResponse().getStatusLine().getStatusCode(), is(RestStatus.BAD_REQUEST.getStatus())); + } } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java deleted file mode 100644 index 85b914be30b2c..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.datastreams; - -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.junit.Before; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -/** - * This should be a yaml test, but in order to write one we would need to expose the new parameter in the rest-api-spec. - * We do not want to do that until the feature flag is removed. For this reason, we temporarily, test the affected APIs here. - * Please convert this to a yaml test when the feature flag is removed. - */ -public class FailureStoreQueryParamIT extends DisabledSecurityDataStreamTestCase { - - private static final String DATA_STREAM_NAME = "failure-data-stream"; - private String backingIndex; - private String failureStoreIndex; - - @SuppressWarnings("unchecked") - @Before - public void setup() throws IOException { - Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/ds-template"); - putComposableIndexTemplateRequest.setJsonEntity(""" - { - "index_patterns": ["failure-data-stream"], - "template": { - "settings": { - "number_of_replicas": 0 - }, - "data_stream_options": { - "failure_store": { - "enabled": true - } - } - }, - "data_stream": { - } - } - """); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - - assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); - // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); - ensureGreen(DATA_STREAM_NAME); - - final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); - List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); - assertThat(dataStreams.size(), is(1)); - Map dataStream = (Map) dataStreams.get(0); - assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); - List backingIndices = getIndices(dataStream); - assertThat(backingIndices.size(), is(1)); - List failureStore = getFailureStore(dataStream); - assertThat(failureStore.size(), is(1)); - backingIndex = backingIndices.get(0); - failureStoreIndex = failureStore.get(0); - } - - public void testGetIndexApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME)); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=exclude")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=only")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testGetIndexStatsApi() throws IOException { - { - final Response statsResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_stats")); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=include") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=only") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexSettingsApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_settings")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexMappingApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testPutIndexMappingApi() throws IOException { - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - assertAcknowledged(client().performRequest(mappingRequest)); - } - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(mappingRequest)); - Map response = entityAsMap(responseException.getResponse()); - assertThat(((Map) response.get("error")).get("reason"), is("failure index not supported")); - } - } - - @SuppressWarnings("unchecked") - private List getFailureStore(Map response) { - var failureStore = (Map) response.get("failure_store"); - return getIndices(failureStore); - - } - - @SuppressWarnings("unchecked") - private List getIndices(Map response) { - List> indices = (List>) response.get("indices"); - return indices.stream().map(index -> index.get("index_name")).toList(); - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java index f090186480b76..8026ec641d040 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java @@ -9,10 +9,6 @@ package org.elasticsearch.datastreams; -import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction; -import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; -import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; @@ -27,12 +23,7 @@ public class DataStreamFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of( - DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE, // Added in 8.12 - LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER, // Added in 8.13 - DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE, - DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14 - ); + return Set.of(); } @Override diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index cb7445705537a..7d5f4bbee32be 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -197,8 +197,7 @@ public Collection createComponents(PluginServices services) { settings, services.client(), services.clusterService(), - errorStoreInitialisationService.get(), - services.featureService() + errorStoreInitialisationService.get() ) ); dataLifecycleInitialisationService.set( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 1d3b1b676282a..cc5e00d8283ad 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -102,10 +103,11 @@ protected ClusterBlockException checkRequestBlock( @Override protected String[] resolveConcreteIndexNames(ClusterState clusterState, DataStreamsStatsAction.Request request) { - return DataStreamsActionUtil.resolveConcreteIndexNames( + return DataStreamsActionUtil.resolveConcreteIndexNamesWithSelector( indexNameExpressionResolver, clusterState, request.indices(), + IndexComponentSelector.ALL_APPLICABLE, request.indicesOptions() ).toArray(String[]::new); } @@ -163,13 +165,17 @@ protected DataStreamsStatsAction.DataStreamShardStats readShardResult(StreamInpu request.indicesOptions(), request.indices() ); - for (String abstractionName : abstractionNames) { - IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); + for (String abstraction : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstraction); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { DataStream dataStream = (DataStream) indexAbstraction; AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); - dataStream.getIndices().stream().map(Index::getName).forEach(index -> { + dataStream.getBackingIndices().getIndices().stream().map(Index::getName).forEach(index -> { + stats.backingIndices.add(index); + allBackingIndices.add(index); + }); + dataStream.getFailureIndices().getIndices().stream().map(Index::getName).forEach(index -> { stats.backingIndices.add(index); allBackingIndices.add(index); }); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java index ffa2447f5f5aa..2d310fef0be7e 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -64,6 +65,7 @@ public class TransportGetDataStreamsAction extends TransportMasterNodeReadAction private final SystemIndices systemIndices; private final ClusterSettings clusterSettings; private final DataStreamGlobalRetentionSettings globalRetentionSettings; + private final DataStreamFailureStoreSettings dataStreamFailureStoreSettings; private final Client client; @Inject @@ -75,6 +77,7 @@ public TransportGetDataStreamsAction( IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices, DataStreamGlobalRetentionSettings globalRetentionSettings, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings, Client client ) { super( @@ -91,6 +94,7 @@ public TransportGetDataStreamsAction( this.systemIndices = systemIndices; this.globalRetentionSettings = globalRetentionSettings; clusterSettings = clusterService.getClusterSettings(); + this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; this.client = new OriginSettingClient(client, "stack"); } @@ -122,6 +126,7 @@ public void onResponse(DataStreamsStatsAction.Response response) { systemIndices, clusterSettings, globalRetentionSettings, + dataStreamFailureStoreSettings, maxTimestamps ) ); @@ -134,7 +139,16 @@ public void onFailure(Exception e) { }); } else { listener.onResponse( - innerOperation(state, request, indexNameExpressionResolver, systemIndices, clusterSettings, globalRetentionSettings, null) + innerOperation( + state, + request, + indexNameExpressionResolver, + systemIndices, + clusterSettings, + globalRetentionSettings, + dataStreamFailureStoreSettings, + null + ) ); } } @@ -146,11 +160,16 @@ static GetDataStreamAction.Response innerOperation( SystemIndices systemIndices, ClusterSettings clusterSettings, DataStreamGlobalRetentionSettings globalRetentionSettings, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings, @Nullable Map maxTimestamps ) { List dataStreams = getDataStreams(state, indexNameExpressionResolver, request); List dataStreamInfos = new ArrayList<>(dataStreams.size()); for (DataStream dataStream : dataStreams) { + // For this action, we are returning whether the failure store is effectively enabled, either in metadata or by cluster setting. + // Users can use the get data stream options API to find out whether it is explicitly enabled in metadata. + boolean failureStoreEffectivelyEnabled = DataStream.isFailureStoreFeatureFlagEnabled() + && dataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings); final String indexTemplate; boolean indexTemplatePreferIlmValue = true; String ilmPolicyName = null; @@ -254,6 +273,7 @@ public int compareTo(IndexInfo o) { dataStreamInfos.add( new GetDataStreamAction.Response.DataStreamInfo( dataStream, + failureStoreEffectivelyEnabled, streamHealth.getStatus(), indexTemplate, ilmPolicyName, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 7d2828e30d5ab..7de3f180753f8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -33,7 +33,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -49,6 +49,9 @@ import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -944,11 +947,6 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false) || (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) { UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); - updateMergePolicySettingsRequest.indicesOptions( - IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - ); updateMergePolicySettingsRequest.indices(indexName); updateMergePolicySettingsRequest.settings( Settings.builder() @@ -998,8 +996,11 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice private void rolloverDataStream(String writeIndexName, RolloverRequest rolloverRequest, ActionListener listener) { // "saving" the rollover target name here so we don't capture the entire request - String rolloverTarget = rolloverRequest.getRolloverTarget(); - logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverTarget); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverRequest.getRolloverTarget()); client.admin().indices().rolloverIndex(rolloverRequest, new ActionListener<>() { @Override public void onResponse(RolloverResponse rolloverResponse) { @@ -1014,7 +1015,7 @@ public void onResponse(RolloverResponse rolloverResponse) { logger.info( "Data stream lifecycle successfully rolled over datastream [{}] due to the following met rollover " + "conditions {}. The new index is [{}]", - rolloverTarget, + rolloverRequest.getRolloverTarget(), metConditions, rolloverResponse.getNewIndex() ); @@ -1024,7 +1025,7 @@ public void onResponse(RolloverResponse rolloverResponse) { @Override public void onFailure(Exception e) { - DataStream dataStream = clusterService.state().metadata().dataStreams().get(rolloverTarget); + DataStream dataStream = clusterService.state().metadata().dataStreams().get(resolvedRolloverTarget.resource()); if (dataStream == null || dataStream.getWriteIndex().getName().equals(writeIndexName) == false) { // the data stream has another write index so no point in recording an error for the previous write index we were // attempting to roll over @@ -1407,9 +1408,7 @@ static RolloverRequest getDefaultRolloverRequest( ) { RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (rolloverFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention)); return rolloverRequest; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java index 1595348649528..7992362d791b1 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java @@ -50,7 +50,7 @@ public static final class Request extends AcknowledgedRequest implement .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java index 552bf75adae23..71575ee88aa7d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java @@ -19,8 +19,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; import org.elasticsearch.health.node.DslErrorInfo; import org.elasticsearch.health.node.UpdateHealthInfoCacheAction; @@ -45,12 +43,10 @@ public class DataStreamLifecycleHealthInfoPublisher { Setting.Property.Dynamic, Setting.Property.NodeScope ); - public static final NodeFeature DSL_HEALTH_INFO_FEATURE = new NodeFeature("health.dsl.info"); private final Client client; private final ClusterService clusterService; private final DataStreamLifecycleErrorStore errorStore; - private final FeatureService featureService; private volatile int signallingErrorRetryInterval; private volatile int maxNumberOfErrorsToPublish; @@ -58,13 +54,11 @@ public DataStreamLifecycleHealthInfoPublisher( Settings settings, Client client, ClusterService clusterService, - DataStreamLifecycleErrorStore errorStore, - FeatureService featureService + DataStreamLifecycleErrorStore errorStore ) { this.client = client; this.clusterService = clusterService; this.errorStore = errorStore; - this.featureService = featureService; this.signallingErrorRetryInterval = DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING.get(settings); this.maxNumberOfErrorsToPublish = DATA_STREAM_LIFECYCLE_MAX_ERRORS_TO_PUBLISH_SETTING.get(settings); } @@ -89,9 +83,6 @@ private void updateNumberOfErrorsToPublish(int newValue) { * {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService#DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING} */ public void publishDslErrorEntries(ActionListener actionListener) { - if (featureService.clusterHasFeature(clusterService.state(), DSL_HEALTH_INFO_FEATURE) == false) { - return; - } // fetching the entries that persist in the error store for more than the signalling retry interval // note that we're reporting this view into the error store on every publishing iteration List errorEntriesToSignal = errorStore.getErrorsInfo( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java index 98a29dd636ddf..860bcb5bf2fbe 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java @@ -39,7 +39,9 @@ public static final class Request extends AcknowledgedRequest implement .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); public Request(StreamInput in) throws IOException { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java index c1354da1129ca..45bda1abd5c02 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java @@ -50,7 +50,9 @@ public static class Request extends MasterNodeReadRequest implements In .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private boolean includeDefaults = false; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java index d055a6972312a..d66b45665d4e2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java @@ -71,7 +71,9 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private final DataStreamOptions options; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index b61e38297397d..be157608b1c3f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; @@ -42,8 +41,7 @@ public class RestGetDataStreamsAction extends BaseRestHandler { IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, "verbose" - ), - DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(IndicesOptions.FAILURE_STORE_QUERY_PARAM) : Set.of() + ) ) ); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index d5c5193948213..e32636fe40d84 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; @@ -22,8 +23,12 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStreamFailureStore; +import org.elasticsearch.cluster.metadata.DataStreamOptions; +import org.elasticsearch.cluster.metadata.ResettableValue; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.json.JsonXContent; @@ -40,12 +45,14 @@ import static java.lang.Math.max; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; public class DataStreamsStatsTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(DataStreamsPlugin.class); + return List.of(DataStreamsPlugin.class, MapperExtrasPlugin.class); } private final Set createdDataStreams = new HashSet<>(); @@ -107,8 +114,30 @@ public void testStatsExistingDataStream() throws Exception { assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); } + public void testStatsExistingDataStreamWithFailureStores() throws Exception { + String dataStreamName = createDataStream(false, true); + createFailedDocument(dataStreamName); + + DataStreamsStatsAction.Response stats = getDataStreamsStats(); + + assertEquals(2, stats.getSuccessfulShards()); + assertEquals(0, stats.getFailedShards()); + assertEquals(1, stats.getDataStreamCount()); + assertEquals(2, stats.getBackingIndices()); + assertNotEquals(0L, stats.getTotalStoreSize().getBytes()); + assertEquals(1, stats.getDataStreams().length); + assertEquals(dataStreamName, stats.getDataStreams()[0].getDataStream()); + assertEquals(2, stats.getDataStreams()[0].getBackingIndices()); + // The timestamp is going to not be something we can validate because + // it captures the time of failure which is uncontrolled in the test + // Just make sure it exists by ensuring it isn't zero + assertThat(stats.getDataStreams()[0].getMaximumTimestamp(), is(greaterThan(0L))); + assertNotEquals(0L, stats.getDataStreams()[0].getStoreSize().getBytes()); + assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); + } + public void testStatsExistingHiddenDataStream() throws Exception { - String dataStreamName = createDataStream(true); + String dataStreamName = createDataStream(true, false); long timestamp = createDocument(dataStreamName); DataStreamsStatsAction.Response stats = getDataStreamsStats(true); @@ -221,14 +250,19 @@ public void testStatsMultipleDataStreams() throws Exception { } private String createDataStream() throws Exception { - return createDataStream(false); + return createDataStream(false, false); } - private String createDataStream(boolean hidden) throws Exception { + private String createDataStream(boolean hidden, boolean failureStore) throws Exception { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.getDefault()); + ResettableValue failureStoreOptions = failureStore == false + ? ResettableValue.undefined() + : ResettableValue.create( + new DataStreamOptions.Template(ResettableValue.create(new DataStreamFailureStore.Template(ResettableValue.create(true)))) + ); Template idxTemplate = new Template(null, new CompressedXContent(""" {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} - """), null); + """), null, null, failureStoreOptions); ComposableIndexTemplate template = ComposableIndexTemplate.builder() .indexPatterns(List.of(dataStreamName + "*")) .template(idxTemplate) @@ -269,6 +303,27 @@ private long createDocument(String dataStreamName) throws Exception { return timestamp; } + private long createFailedDocument(String dataStreamName) throws Exception { + // Get some randomized but reasonable timestamps on the data since not all of it is guaranteed to arrive in order. + long timeSeed = System.currentTimeMillis(); + long timestamp = randomLongBetween(timeSeed - TimeUnit.HOURS.toMillis(5), timeSeed); + client().bulk( + new BulkRequest(dataStreamName).add( + new IndexRequest().opType(DocWriteRequest.OpType.CREATE) + .source( + JsonXContent.contentBuilder() + .startObject() + .field("@timestamp", timestamp) + .object("data", b -> b.field("garbage", randomAlphaOfLength(25))) + .endObject() + ) + ) + ).get(); + indicesAdmin().refresh(new RefreshRequest(".fs-" + dataStreamName + "*").indicesOptions(IndicesOptions.lenientExpandOpenHidden())) + .get(); + return timestamp; + } + private DataStreamsStatsAction.Response getDataStreamsStats() throws Exception { return getDataStreamsStats(false); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 710ea8c15b66e..9414943cbb439 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -102,6 +102,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti Response.DataStreamInfo dataStreamInfo = new Response.DataStreamInfo( logs, + true, ClusterHealthStatus.GREEN, "index-template", null, @@ -205,6 +206,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti Response.DataStreamInfo dataStreamInfo = new Response.DataStreamInfo( logs, + true, ClusterHealthStatus.GREEN, "index-template", null, @@ -282,6 +284,7 @@ public void testManagedByDisplayValuesDontAccidentalyChange() { private Response.DataStreamInfo mutateInstance(Response.DataStreamInfo instance) { var dataStream = instance.getDataStream(); + var failureStoreEffectivelyEnabled = instance.isFailureStoreEffectivelyEnabled(); var status = instance.getDataStreamStatus(); var indexTemplate = instance.getIndexTemplate(); var ilmPolicyName = instance.getIlmPolicy(); @@ -289,7 +292,7 @@ private Response.DataStreamInfo mutateInstance(Response.DataStreamInfo instance) var indexSettings = instance.getIndexSettingsValues(); var templatePreferIlm = instance.templatePreferIlmValue(); var maximumTimestamp = instance.getMaximumTimestamp(); - switch (randomIntBetween(0, 7)) { + switch (randomIntBetween(0, 8)) { case 0 -> dataStream = randomValueOtherThan(dataStream, DataStreamTestHelper::randomInstance); case 1 -> status = randomValueOtherThan(status, () -> randomFrom(ClusterHealthStatus.values())); case 2 -> indexTemplate = randomBoolean() && indexTemplate != null ? null : randomAlphaOfLengthBetween(2, 10); @@ -314,9 +317,11 @@ private Response.DataStreamInfo mutateInstance(Response.DataStreamInfo instance) case 7 -> maximumTimestamp = (maximumTimestamp == null) ? randomNonNegativeLong() : (usually() ? randomValueOtherThan(maximumTimestamp, ESTestCase::randomNonNegativeLong) : null); + case 8 -> failureStoreEffectivelyEnabled = failureStoreEffectivelyEnabled ? false : true; } return new Response.DataStreamInfo( dataStream, + failureStoreEffectivelyEnabled, status, indexTemplate, ilmPolicyName, @@ -355,6 +360,7 @@ private Response.DataStreamInfo generateRandomDataStreamInfo() { List> timeSeries = randomBoolean() ? generateRandomTimeSeries() : null; return new Response.DataStreamInfo( DataStreamTestHelper.randomInstance(), + randomBoolean(), ClusterHealthStatus.GREEN, randomAlphaOfLengthBetween(2, 10), randomAlphaOfLengthBetween(2, 10), diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java index 2efe881266c1b..ba4627f8955a1 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/TransportGetDataStreamsActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; @@ -39,6 +40,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; public class TransportGetDataStreamsActionTests extends ESTestCase { @@ -48,6 +51,9 @@ public class TransportGetDataStreamsActionTests extends ESTestCase { private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create( ClusterSettings.createBuiltInClusterSettings() ); + private final DataStreamFailureStoreSettings emptyDataStreamFailureStoreSettings = DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings() + ); public void testGetDataStream() { final String dataStreamName = "my-data-stream"; @@ -166,6 +172,7 @@ public void testGetTimeSeriesDataStream() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat( @@ -197,6 +204,7 @@ public void testGetTimeSeriesDataStream() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat( @@ -248,6 +256,7 @@ public void testGetTimeSeriesDataStreamWithOutOfOrderIndices() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat( @@ -292,6 +301,7 @@ public void testGetTimeSeriesMixedDataStream() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); @@ -338,6 +348,7 @@ public void testPassingGlobalRetention() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat(response.getGlobalRetention(), nullValue()); @@ -363,8 +374,102 @@ public void testPassingGlobalRetention() { systemIndices, ClusterSettings.createBuiltInClusterSettings(), withGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, null ); assertThat(response.getGlobalRetention(), equalTo(globalRetention)); } + + public void testDataStreamIsFailureStoreEffectivelyEnabled_disabled() { + var metadata = new Metadata.Builder(); + DataStreamTestHelper.getClusterStateWithDataStreams( + metadata, + List.of(Tuple.tuple("data-stream-1", 2)), + List.of(), + System.currentTimeMillis(), + Settings.EMPTY, + 0, + false, + false + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); + var response = TransportGetDataStreamsAction.innerOperation( + state, + req, + resolver, + systemIndices, + ClusterSettings.createBuiltInClusterSettings(), + dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, + null + ); + assertThat(response.getDataStreams(), hasSize(1)); + assertThat(response.getDataStreams().getFirst().isFailureStoreEffectivelyEnabled(), is(false)); + } + + public void testDataStreamIsFailureStoreEffectivelyEnabled_enabledExplicitly() { + var metadata = new Metadata.Builder(); + DataStreamTestHelper.getClusterStateWithDataStreams( + metadata, + List.of(Tuple.tuple("data-stream-1", 2)), + List.of(), + System.currentTimeMillis(), + Settings.EMPTY, + 0, + false, + true + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); + var response = TransportGetDataStreamsAction.innerOperation( + state, + req, + resolver, + systemIndices, + ClusterSettings.createBuiltInClusterSettings(), + dataStreamGlobalRetentionSettings, + emptyDataStreamFailureStoreSettings, + null + ); + assertThat(response.getDataStreams(), hasSize(1)); + assertThat(response.getDataStreams().getFirst().isFailureStoreEffectivelyEnabled(), is(true)); + } + + public void testDataStreamIsFailureStoreEffectivelyEnabled_enabledByClusterSetting() { + var metadata = new Metadata.Builder(); + DataStreamTestHelper.getClusterStateWithDataStreams( + metadata, + List.of(Tuple.tuple("data-stream-1", 2)), + List.of(), + System.currentTimeMillis(), + Settings.EMPTY, + 0, + false, + false + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); + + var req = new GetDataStreamAction.Request(TEST_REQUEST_TIMEOUT, new String[] {}); + var response = TransportGetDataStreamsAction.innerOperation( + state, + req, + resolver, + systemIndices, + ClusterSettings.createBuiltInClusterSettings(), + dataStreamGlobalRetentionSettings, + DataStreamFailureStoreSettings.create( + ClusterSettings.createBuiltInClusterSettings( + Settings.builder() + .put(DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING.getKey(), "data-stream-*") + .build() + ) + ), + null + ); + assertThat(response.getDataStreams(), hasSize(1)); + assertThat(response.getDataStreams().getFirst().isFailureStoreEffectivelyEnabled(), is(true)); + } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 698ab427ab040..0bb990e544892 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; @@ -46,6 +46,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -66,9 +67,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.datastreams.DataStreamFeatures; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -182,13 +181,7 @@ public void setupServices() { () -> now, errorStore, allocationService, - new DataStreamLifecycleHealthInfoPublisher( - Settings.EMPTY, - client, - clusterService, - errorStore, - new FeatureService(List.of(new DataStreamFeatures())) - ), + new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore), globalRetentionSettings ); clientDelegate = null; @@ -225,11 +218,12 @@ public void testOperationsExecutedOnce() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); List deleteRequests = clientSeenRequests.subList(2, 5) .stream() .map(transportRequest -> (DeleteIndexRequest) transportRequest) @@ -1463,13 +1457,7 @@ public void testTrackingTimeStats() { () -> now.getAndAdd(delta), errorStore, mock(AllocationService.class), - new DataStreamLifecycleHealthInfoPublisher( - Settings.EMPTY, - getTransportRequestsRecordingClient(), - clusterService, - errorStore, - new FeatureService(List.of(new DataStreamFeatures())) - ), + new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, getTransportRequestsRecordingClient(), clusterService, errorStore), globalRetentionSettings ); assertThat(service.getLastRunDuration(), is(nullValue())); @@ -1546,11 +1534,12 @@ public void testFailureStoreIsManagedEvenWhenDisabled() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); assertThat( ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], is(dataStream.getFailureIndices().getIndices().get(0).getName()) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java index cff6127e0729e..f8a2ac3c61029 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java @@ -24,10 +24,8 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.datastreams.DataStreamFeatures; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; import org.elasticsearch.health.node.DslErrorInfo; import org.elasticsearch.health.node.UpdateHealthInfoCacheAction; @@ -40,7 +38,6 @@ import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; @@ -83,13 +80,7 @@ public void setupServices() { final Client client = getTransportRequestsRecordingClient(); errorStore = new DataStreamLifecycleErrorStore(() -> now); - dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher( - Settings.EMPTY, - client, - clusterService, - errorStore, - new FeatureService(List.of(new DataStreamFeatures())) - ); + dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore); } @After @@ -105,16 +96,6 @@ public void testPublishDslErrorEntries() { } errorStore.recordError("testIndex", new IllegalStateException("bad state")); ClusterState stateWithHealthNode = ClusterStateCreationUtils.state(node1, node1, node1, allNodes); - stateWithHealthNode = ClusterState.builder(stateWithHealthNode) - .nodeFeatures( - Map.of( - node1.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()), - node2.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()) - ) - ) - .build(); ClusterServiceUtils.setState(clusterService, stateWithHealthNode); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @Override @@ -143,16 +124,6 @@ public void testPublishDslErrorEntriesNoHealthNode() { errorStore.recordError("testIndex", new IllegalStateException("bad state")); ClusterState stateNoHealthNode = ClusterStateCreationUtils.state(node1, node1, null, allNodes); - stateNoHealthNode = ClusterState.builder(stateNoHealthNode) - .nodeFeatures( - Map.of( - node1.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()), - node2.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()) - ) - ) - .build(); ClusterServiceUtils.setState(clusterService, stateNoHealthNode); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @Override @@ -170,16 +141,6 @@ public void onFailure(Exception e) { public void testPublishDslErrorEntriesEmptyErrorStore() { // publishes the empty error store (this is the "back to healthy" state where all errors have been fixed) ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes); - state = ClusterState.builder(state) - .nodeFeatures( - Map.of( - node1.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()), - node2.getId(), - Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()) - ) - ) - .build(); ClusterServiceUtils.setState(clusterService, state); dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() { @Override diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 9ea3bfefabdf8..884adb5458102 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -300,9 +300,6 @@ index without timestamp with pipeline: --- dynamic templates: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -450,9 +447,6 @@ dynamic templates: --- dynamic templates - conflicting aliases: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -549,9 +543,6 @@ dynamic templates - conflicting aliases: --- dynamic templates - conflicting aliases with top-level field: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [otel] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -632,9 +623,6 @@ dynamic templates - conflicting aliases with top-level field: --- dynamic templates with nesting: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -810,10 +798,6 @@ dynamic templates with nesting: --- dynamic templates with incremental indexing: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -1038,9 +1022,6 @@ dynamic templates with incremental indexing: --- subobject in passthrough object auto flatten: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation" @@ -1108,9 +1089,6 @@ enable subobjects in passthrough object: --- passthrough objects with duplicate priority: - - requires: - cluster_features: ["mapper.pass_through_priority"] - reason: support for priority in passthrough objects - do: catch: /has a conflicting param/ indices.put_index_template: @@ -1135,9 +1113,6 @@ passthrough objects with duplicate priority: --- dimensions with ignore_malformed and ignore_above: - - requires: - cluster_features: ["mapper.keyword_dimension_ignore_above"] - reason: support for ignore_above on keyword dimensions - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -1229,9 +1204,6 @@ dimensions with ignore_malformed and ignore_above: --- non string dimension fields: - - requires: - cluster_features: ["mapper.pass_through_priority", "routing.boolean_routing_path", "mapper.boolean_dimension"] - reason: support for priority in passthrough objects - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" @@ -1339,10 +1311,6 @@ non string dimension fields: --- multi value dimensions: - - requires: - cluster_features: ["routing.multi_value_routing_path"] - reason: support for multi-value dimensions - - do: allowed_warnings: - "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation" diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml index 13f79e95d99f4..f439cf59bf2d3 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml @@ -148,8 +148,7 @@ # rollover data stream to create new failure store index - do: indices.rollover: - alias: "data-stream-for-modification" - target_failure_store: true + alias: "data-stream-for-modification::failures" - is_true: acknowledged # save index names for later use diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_require_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_require_data_stream.yml similarity index 100% rename from modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_require_data_stream.yml rename to modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_require_data_stream.yml diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/20_unsupported_apis.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/20_unsupported_apis.yml index 709eb41ccb61d..a1c952d4c0fad 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/20_unsupported_apis.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/20_unsupported_apis.yml @@ -45,8 +45,6 @@ - do: indices.close: index: logs-* - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - length: { indices: 0 } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml similarity index 95% rename from modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml rename to modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml index cc3a11ffde5e8..51a1e96b1e937 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml @@ -9,7 +9,7 @@ setup: capabilities: [ 'failure_store_in_template' ] - method: POST path: /{index}/_rollover - capabilities: [ 'lazy-rollover-failure-store' ] + capabilities: [ 'lazy-rollover-failure-store', 'index-expression-selectors' ] - do: allowed_warnings: @@ -58,8 +58,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } @@ -92,8 +91,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 @@ -130,8 +128,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_primary_shard_docs: 2 @@ -165,8 +162,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -263,8 +259,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: data-stream-for-lazy-rollover - target_failure_store: true + alias: data-stream-for-lazy-rollover::failures lazy: true - match: { acknowledged: true } @@ -332,8 +327,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -377,16 +371,14 @@ teardown: - do: catch: /Rolling over\/initializing an empty failure store is only supported without conditions\./ indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -424,8 +416,7 @@ teardown: # Initializing should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -448,8 +439,7 @@ teardown: # And "regular" rollover should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/220_failure_store_cluster_setting.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/220_failure_store_cluster_setting.yml new file mode 100644 index 0000000000000..90bd6fe406b57 --- /dev/null +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/220_failure_store_cluster_setting.yml @@ -0,0 +1,222 @@ +setup: + - requires: + reason: "Data stream options was added in 8.18+" + test_runner_features: [ capabilities, allowed_warnings, contains ] + capabilities: + - method: POST + path: /{index}/_doc + capabilities: [ 'failure_store_status' ] + - method: POST + path: /_index_template/{template} + capabilities: [ 'failure_store_in_template' ] + - method: PUT + path: /_cluster/settings + capabilities: [ 'data_stream_failure_store_cluster_setting' ] + + - do: + cluster.put_settings: + body: + persistent: + data_streams.failure_store.enabled: '*-matching' + + - do: + ingest.put_pipeline: + id: "failing_pipeline" + body: > + { + "processors": [ + { + "fail": { + "message" : "pipeline go boom" + } + } + ] + } + + - do: + indices.put_index_template: + name: index_template_default_fs + body: + index_patterns: default-fs-* + data_stream: {} + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + - do: + cluster.put_component_template: + name: component_template_disabled_fs + body: + template: + data_stream_options: + failure_store: + enabled: false + + - do: + indices.put_index_template: + name: index_template_disabled_fs + body: + index_patterns: disabled-fs-* + data_stream: {} + composed_of: + - component_template_disabled_fs + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + +--- +teardown: + - do: + indices.delete_data_stream: + name: default-fs-matching + ignore: 404 + + - do: + indices.delete_data_stream: + name: disabled-fs-matching + ignore: 404 + + - do: + indices.delete_index_template: + name: index_template_disabled_fs + ignore: 404 + + - do: + cluster.delete_component_template: + name: component_template_disabled_fs + ignore: 404 + + - do: + indices.delete_index_template: + name: index_template_default_fs + ignore: 404 + + - do: + ingest.delete_pipeline: + id: "failing_pipeline" + ignore: 404 + + - do: + cluster.put_settings: + body: + persistent: + data_streams.failure_store.enabled: null + +--- +"Redirect ingest failure when auto-creating data stream to failure store when enabled by setting": + - do: + index: + index: default-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Redirect ingest failure into pre-existing data stream to failure store when enabled by setting": + - do: + indices.create_data_stream: + name: default-fs-matching + + - do: + index: + index: default-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Do not redirect ingest failure when auto-creating data stream to failure store when enabled by setting but disabled in template": + - do: + index: + index: disabled-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + catch: '/pipeline go boom/' + +--- +"Do not redirect ingest failure into pre-existing data stream to failure store when enabled by setting but disabled in template": + - do: + indices.create_data_stream: + name: disabled-fs-matching + + - do: + index: + index: disabled-fs-matching + refresh: true + pipeline: 'failing_pipeline' + body: + '@timestamp': '2020-12-12' + foo: bar + catch: '/pipeline go boom/' + +--- +"Redirect mapping failure when auto-creating data stream to failure store when enabled by setting": + - do: + index: + index: default-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Redirect mapping failure into pre-existing data stream to failure store when enabled by setting": + - do: + indices.create_data_stream: + name: default-fs-matching + + - do: + index: + index: default-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + - match: { failure_store: used } + - match: { _index: '/\.fs-default-fs-matching-(\d{4}\.\d{2}\.\d{2}-)?\d{6}/' } + +--- +"Do not redirect mapping failure when auto-creating data stream to failure store when enabled by setting but disabled in template": + - do: + index: + index: disabled-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + catch: '/failed to parse field/' + +--- +"Do not redirect mapping failure into pre-existing data stream to failure store when enabled by setting but disabled in template": + - do: + indices.create_data_stream: + name: disabled-fs-matching + + - do: + index: + index: disabled-fs-matching + refresh: true + body: + '@timestamp': 'not a timestamp' + foo: bar + catch: '/failed to parse field/' + +# See also DataStreamOptionsIT for tests of the interaction between the failure store cluster setting and using +# the /_data_stream/{name}/_options API to explicitly enable and disable the failure store. (At time of writing, these +# can only be done in a Java REST test case, not a YAML one, because the failure store is behind a feature gate and so +# the REST API spec has not been added.) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/40_supported_apis.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/40_supported_apis.yml index 72fce418d8899..8cbb707034546 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/40_supported_apis.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/40_supported_apis.yml @@ -230,8 +230,6 @@ teardown: - do: indices.close: index: ".ds-simple-data-stream1-*000001,.ds-simple-data-stream1-*000002" - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/80_resolve_index_data_streams.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/80_resolve_index_data_streams.yml index 810b0e43a0da6..eb07438661330 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/80_resolve_index_data_streams.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/80_resolve_index_data_streams.yml @@ -54,8 +54,6 @@ setup: - do: indices.close: index: test_index2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: indices.create: diff --git a/modules/dot-prefix-validation/build.gradle b/modules/dot-prefix-validation/build.gradle index 6e232570b4a22..bbbbbb5609f1e 100644 --- a/modules/dot-prefix-validation/build.gradle +++ b/modules/dot-prefix-validation/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Validation for dot-prefixed indices for non-operator users' - classname 'org.elasticsearch.validation.DotPrefixValidationPlugin' + description = 'Validation for dot-prefixed indices for non-operator users' + classname = 'org.elasticsearch.validation.DotPrefixValidationPlugin' } restResources { diff --git a/modules/dot-prefix-validation/src/main/java/org/elasticsearch/validation/DotPrefixValidator.java b/modules/dot-prefix-validation/src/main/java/org/elasticsearch/validation/DotPrefixValidator.java index 7f65bbb6937d9..80fb8a3b2e364 100644 --- a/modules/dot-prefix-validation/src/main/java/org/elasticsearch/validation/DotPrefixValidator.java +++ b/modules/dot-prefix-validation/src/main/java/org/elasticsearch/validation/DotPrefixValidator.java @@ -67,7 +67,7 @@ public abstract class DotPrefixValidator implements MappedActionFil ".ml-state", ".ml-anomalies-unrelated" ); - public static Setting> IGNORED_INDEX_PATTERNS_SETTING = Setting.stringListSetting( + public static final Setting> IGNORED_INDEX_PATTERNS_SETTING = Setting.stringListSetting( "cluster.indices.validate_ignored_dot_patterns", List.of( "\\.ml-state-\\d+", diff --git a/modules/health-shards-availability/build.gradle b/modules/health-shards-availability/build.gradle index 4bf914fb5fdd6..62686a44a9336 100644 --- a/modules/health-shards-availability/build.gradle +++ b/modules/health-shards-availability/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Health report API extension providing the shards_availability output' - classname 'org.elasticsearch.health.plugin.ShardsAvailabilityPlugin' + description = 'Health report API extension providing the shards_availability output' + classname = 'org.elasticsearch.health.plugin.ShardsAvailabilityPlugin' } restResources { diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index 8fe2b82fe21fb..a172112948fd3 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { - description 'Ingest processor that uses Apache Tika to extract contents' - classname 'org.elasticsearch.ingest.attachment.IngestAttachmentPlugin' + description = 'Ingest processor that uses Apache Tika to extract contents' + classname = 'org.elasticsearch.ingest.attachment.IngestAttachmentPlugin' } // this overwrites the 'versions' map from Elasticsearch itself, but indeed we want that -- we're interested in managing our dependencies diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 7cfdba4d33744..c49e0ea4b7180 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' - classname 'org.elasticsearch.ingest.common.IngestCommonPlugin' + description = 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' + classname ='org.elasticsearch.ingest.common.IngestCommonPlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 4312221b33937..988ca317b4730 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -14,8 +14,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Ingest processor that uses lookup geo data based on IP addresses using the MaxMind geo database' - classname 'org.elasticsearch.ingest.geoip.IngestGeoIpPlugin' + description = 'Ingest processor that uses lookup geo data based on IP addresses using the MaxMind geo database' + classname ='org.elasticsearch.ingest.geoip.IngestGeoIpPlugin' bundleSpec.from("${project.buildDir}/ingest-geoip") { into '/' diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java index a50fe7dee9008..58ff64c97b2e0 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java @@ -15,9 +15,10 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -90,8 +91,8 @@ public static IngestGeoIpMetadata fromXContent(XContentParser parser) throws IOE } @Override - public Iterator toXContentChunked(ToXContent.Params params) { - return ChunkedToXContent.builder(params).xContentObjectFields(DATABASES_FIELD.getPreferredName(), databases); + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat(ChunkedToXContentHelper.xContentObjectFields(DATABASES_FIELD.getPreferredName(), databases)); } @Override diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java index aa48c73cf1d73..08efe87e6fde5 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java @@ -160,11 +160,6 @@ public void writeTo(StreamOutput out) throws IOException { if (provider instanceof Maxmind maxmind) { out.writeString(maxmind.accountId); } else { - /* - * The existence of a non-Maxmind providers is gated on the feature get_database_configuration_action.multi_node, and - * get_database_configuration_action.multi_node is only available on or after - * TransportVersions.INGEST_GEO_DATABASE_PROVIDERS. - */ assert false : "non-maxmind DatabaseConfiguration.Provider [" + provider.getWriteableName() + "]"; } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java index 1970883e91b3e..68b3ce279a89d 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationAction.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata.DATABASE; @@ -91,6 +92,11 @@ protected Response(StreamInput in) throws IOException { this.databases = in.readCollectionAsList(DatabaseConfigurationMetadata::new); } + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(databases); + } + @Override protected List readNodesFrom(StreamInput in) throws IOException { return in.readCollectionAsList(NodeResponse::new); @@ -122,6 +128,63 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + /* + * This implementation of equals exists solely for testing the serialization of this object. + */ + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return Objects.equals(databases, response.databases) + && Objects.equals(getClusterName(), response.getClusterName()) + && Objects.equals(equalsHashCodeFailures(), response.equalsHashCodeFailures()) + && Objects.equals(getNodes(), response.getNodes()) + && Objects.equals(equalsHashCodeNodesMap(), response.equalsHashCodeNodesMap()); + } + + /* + * This implementation of hashCode exists solely for testing the serialization of this object. + */ + @Override + public int hashCode() { + return Objects.hash(databases, getClusterName(), equalsHashCodeFailures(), getNodes(), equalsHashCodeNodesMap()); + } + + /* + * FailedNodeException does not implement equals or hashCode, making it difficult to test the serialization of this class. This + * helper method wraps the failures() list with a class that does implement equals and hashCode. + */ + private List equalsHashCodeFailures() { + return failures().stream().map(EqualsHashCodeFailedNodeException::new).toList(); + } + + private record EqualsHashCodeFailedNodeException(FailedNodeException failedNodeException) { + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null || getClass() != o.getClass()) return false; + EqualsHashCodeFailedNodeException other = (EqualsHashCodeFailedNodeException) o; + return Objects.equals(failedNodeException.nodeId(), other.failedNodeException.nodeId()) + && Objects.equals(failedNodeException.getMessage(), other.failedNodeException.getMessage()); + } + + @Override + public int hashCode() { + return Objects.hash(failedNodeException.nodeId(), failedNodeException.getMessage()); + } + } + + /* + * The getNodesMap method changes the value of the nodesMap, causing failures when testing the concurrent serialization and + * deserialization of this class. Since this is a response object, we do not actually care about concurrency since it will not + * happen in practice. So this helper method synchronizes access to getNodesMap, which can be used from equals and hashCode for + * tests. + */ + private synchronized Map equalsHashCodeNodesMap() { + return getNodesMap(); + } } public static class NodeRequest extends TransportRequest { @@ -186,6 +249,7 @@ public List getDatabases() { @Override public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); out.writeCollection(databases); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java index c83c40e56b749..a1faaf1bb0196 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.geoip.DatabaseNodeService; import org.elasticsearch.ingest.geoip.GeoIpTaskState; import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; @@ -41,8 +40,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.elasticsearch.ingest.IngestGeoIpFeatures.GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE; - public class TransportGetDatabaseConfigurationAction extends TransportNodesAction< GetDatabaseConfigurationAction.Request, GetDatabaseConfigurationAction.Response, @@ -50,7 +47,6 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio GetDatabaseConfigurationAction.NodeResponse, List> { - private final FeatureService featureService; private final DatabaseNodeService databaseNodeService; @Inject @@ -59,7 +55,6 @@ public TransportGetDatabaseConfigurationAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - FeatureService featureService, DatabaseNodeService databaseNodeService ) { super( @@ -70,39 +65,9 @@ public TransportGetDatabaseConfigurationAction( GetDatabaseConfigurationAction.NodeRequest::new, threadPool.executor(ThreadPool.Names.MANAGEMENT) ); - this.featureService = featureService; this.databaseNodeService = databaseNodeService; } - @Override - protected void doExecute( - Task task, - GetDatabaseConfigurationAction.Request request, - ActionListener listener - ) { - if (featureService.clusterHasFeature(clusterService.state(), GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE) == false) { - /* - * TransportGetDatabaseConfigurationAction used to be a TransportMasterNodeAction, and not all nodes in the cluster have been - * updated. So we don't want to send node requests to the other nodes because they will blow up. Instead, we just return - * the information that we used to return from the master node (it doesn't make any difference that this might not be the master - * node, because we're only reading the cluster state). Because older nodes only know about the Maxmind provider type, we filter - * out all others here to avoid causing problems on those nodes. - */ - newResponseAsync( - task, - request, - createActionContext(task, request).stream() - .filter(database -> database.database().provider() instanceof DatabaseConfiguration.Maxmind) - .toList(), - List.of(), - List.of(), - listener - ); - } else { - super.doExecute(task, request, listener); - } - } - protected List createActionContext(Task task, GetDatabaseConfigurationAction.Request request) { final Set ids; if (request.getDatabaseIds().length == 0) { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java index dfb8fa78089d2..e68bb9d82e91b 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java @@ -29,7 +29,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata; import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request; import org.elasticsearch.injection.guice.Inject; @@ -42,8 +41,6 @@ import java.util.Map; import java.util.Optional; -import static org.elasticsearch.ingest.IngestGeoIpFeatures.PUT_DATABASE_CONFIGURATION_ACTION_IPINFO; - public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction { private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class); @@ -61,7 +58,6 @@ public void taskSucceeded(UpdateDatabaseConfigurationTask task, Void unused) { } }; - private final FeatureService featureService; private final MasterServiceTaskQueue updateDatabaseConfigurationTaskQueue; @Inject @@ -70,8 +66,7 @@ public TransportPutDatabaseConfigurationAction( ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - FeatureService featureService + IndexNameExpressionResolver indexNameExpressionResolver ) { super( PutDatabaseConfigurationAction.NAME, @@ -84,7 +79,6 @@ public TransportPutDatabaseConfigurationAction( AcknowledgedResponse::readFrom, EsExecutors.DIRECT_EXECUTOR_SERVICE ); - this.featureService = featureService; this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue( "update-geoip-database-configuration-state-update", Priority.NORMAL, @@ -96,18 +90,6 @@ public TransportPutDatabaseConfigurationAction( protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { final String id = request.getDatabase().id(); - // if this is an ipinfo configuration, then make sure the whole cluster supports that feature - if (request.getDatabase().provider() instanceof DatabaseConfiguration.Ipinfo - && featureService.clusterHasFeature(clusterService.state(), PUT_DATABASE_CONFIGURATION_ACTION_IPINFO) == false) { - listener.onFailure( - new IllegalArgumentException( - "Unable to use ipinfo database configurations in mixed-clusters with nodes that do not support feature " - + PUT_DATABASE_CONFIGURATION_ACTION_IPINFO.id() - ) - ); - return; - } - updateDatabaseConfigurationTaskQueue.submitTask( Strings.format("update-geoip-database-configuration-[%s]", id), new UpdateDatabaseConfigurationTask(listener, request.getDatabase()), diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java new file mode 100644 index 0000000000000..12fb08a5a1abf --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionNodeResponseTests.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.emptySet; + +public class GetDatabaseConfigurationActionNodeResponseTests extends AbstractWireSerializingTestCase< + GetDatabaseConfigurationAction.NodeResponse> { + @Override + protected Writeable.Reader instanceReader() { + return GetDatabaseConfigurationAction.NodeResponse::new; + } + + @Override + protected GetDatabaseConfigurationAction.NodeResponse createTestInstance() { + return getRandomDatabaseConfigurationActionNodeResponse(); + } + + static GetDatabaseConfigurationAction.NodeResponse getRandomDatabaseConfigurationActionNodeResponse() { + return new GetDatabaseConfigurationAction.NodeResponse(randomDiscoveryNode(), getRandomDatabaseConfigurationMetadata()); + } + + private static DiscoveryNode randomDiscoveryNode() { + return DiscoveryNodeUtils.builder(randomAlphaOfLength(6)).roles(emptySet()).build(); + } + + static List getRandomDatabaseConfigurationMetadata() { + return randomList( + 0, + 20, + () -> new DatabaseConfigurationMetadata( + new DatabaseConfiguration( + randomAlphaOfLength(20), + randomAlphaOfLength(20), + randomFrom( + List.of( + new DatabaseConfiguration.Local(randomAlphaOfLength(10)), + new DatabaseConfiguration.Web(), + new DatabaseConfiguration.Ipinfo(), + new DatabaseConfiguration.Maxmind(randomAlphaOfLength(10)) + ) + ) + ), + randomNonNegativeLong(), + randomNonNegativeLong() + ) + ); + } + + @Override + protected GetDatabaseConfigurationAction.NodeResponse mutateInstance(GetDatabaseConfigurationAction.NodeResponse instance) + throws IOException { + return null; + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Maxmind.NAME, + DatabaseConfiguration.Maxmind::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Ipinfo.NAME, + DatabaseConfiguration.Ipinfo::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Local.NAME, + DatabaseConfiguration.Local::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Web.NAME, + DatabaseConfiguration.Web::new + ) + ) + ); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java new file mode 100644 index 0000000000000..1b48a409d7876 --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/GetDatabaseConfigurationActionResponseTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.ingest.geoip.direct; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.List; + +public class GetDatabaseConfigurationActionResponseTests extends AbstractWireSerializingTestCase { + @Override + protected Writeable.Reader instanceReader() { + return GetDatabaseConfigurationAction.Response::new; + } + + @Override + protected GetDatabaseConfigurationAction.Response createTestInstance() { + return new GetDatabaseConfigurationAction.Response( + GetDatabaseConfigurationActionNodeResponseTests.getRandomDatabaseConfigurationMetadata(), + getTestClusterName(), + getTestNodeResponses(), + getTestFailedNodeExceptions() + ); + } + + @Override + protected GetDatabaseConfigurationAction.Response mutateInstance(GetDatabaseConfigurationAction.Response instance) throws IOException { + return null; + } + + private ClusterName getTestClusterName() { + return new ClusterName(randomAlphaOfLength(30)); + } + + private List getTestNodeResponses() { + return randomList(0, 20, GetDatabaseConfigurationActionNodeResponseTests::getRandomDatabaseConfigurationActionNodeResponse); + } + + private List getTestFailedNodeExceptions() { + return randomList( + 0, + 5, + () -> new FailedNodeException( + randomAlphaOfLength(10), + randomAlphaOfLength(20), + new ElasticsearchException(randomAlphaOfLength(10)) + ) + ); + } + + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + List.of( + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Maxmind.NAME, + DatabaseConfiguration.Maxmind::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Ipinfo.NAME, + DatabaseConfiguration.Ipinfo::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Local.NAME, + DatabaseConfiguration.Local::new + ), + new NamedWriteableRegistry.Entry( + DatabaseConfiguration.Provider.class, + DatabaseConfiguration.Web.NAME, + DatabaseConfiguration.Web::new + ) + ) + ); + } +} diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml index a1104505bc240..007c82db4c923 100644 --- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml @@ -1,9 +1,3 @@ ---- -setup: - - requires: - cluster_features: ["geoip.downloader.database.configuration", "get_database_configuration_action.multi_node"] - reason: "geoip downloader database configuration APIs added in 8.15, and updated in 8.16 to return more results" - --- teardown: - do: diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml index fd73c715a5ac5..0947984769529 100644 --- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml @@ -1,9 +1,3 @@ -setup: - - requires: - cluster_features: - - "put_database_configuration_action.ipinfo" - reason: "ipinfo support added in 8.16" - --- "Test ip_location processor with defaults": - do: diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml index e2e9a1fdb5e28..47f09392df60e 100644 --- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml @@ -1,10 +1,3 @@ ---- -setup: - - requires: - cluster_features: - - "put_database_configuration_action.ipinfo" - reason: "ip location downloader database configuration APIs added in 8.16 to support more types" - --- teardown: - do: diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index a504b5a64807c..76f88544ea10f 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { - description 'Ingest processor that extracts information from a user agent' - classname 'org.elasticsearch.ingest.useragent.IngestUserAgentPlugin' + description = 'Ingest processor that extracts information from a user agent' + classname ='org.elasticsearch.ingest.useragent.IngestUserAgentPlugin' } restResources { diff --git a/modules/kibana/build.gradle b/modules/kibana/build.gradle index 6ead4cc68506c..deaf2664236ba 100644 --- a/modules/kibana/build.gradle +++ b/modules/kibana/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-java-rest-test' esplugin { - description 'Plugin exposing APIs for Kibana system indices' - classname 'org.elasticsearch.kibana.KibanaPlugin' + description = 'Plugin exposing APIs for Kibana system indices' + classname ='org.elasticsearch.kibana.KibanaPlugin' } dependencies { diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java index 553e4696af316..a9ab0c02612f6 100644 --- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java +++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java @@ -16,6 +16,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; @@ -23,7 +25,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolStats; @@ -49,10 +50,6 @@ * threads that wait on a phaser. This lets us verify that operations on system indices * are being directed to other thread pools.

*/ -@TestLogging( - reason = "investigate", - value = "org.elasticsearch.kibana.KibanaThreadPoolIT:DEBUG,org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor:TRACE" -) public class KibanaThreadPoolIT extends ESIntegTestCase { private static final Logger logger = LogManager.getLogger(KibanaThreadPoolIT.class); @@ -68,6 +65,8 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .put("thread_pool.write.queue_size", 1) .put("thread_pool.get.size", 1) .put("thread_pool.get.queue_size", 1) + // a rejected GET may retry on an INITIALIZING shard (the target of a relocation) and unexpectedly succeed, so block rebalancing + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) .build(); } @@ -112,7 +111,12 @@ public void testKibanaThreadPoolByPassesBlockedThreadPools() throws Exception { } public void testBlockedThreadPoolsRejectUserRequests() throws Exception { - assertAcked(client().admin().indices().prepareCreate(USER_INDEX)); + assertAcked( + client().admin() + .indices() + .prepareCreate(USER_INDEX) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) // avoid retrying rejected actions + ); runWithBlockedThreadPools(this::assertThreadPoolsBlocked); diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 1342450755152..a7f30c4fbb5cd 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Lucene expressions integration for Elasticsearch' - classname 'org.elasticsearch.script.expression.ExpressionPlugin' + description = 'Lucene expressions integration for Elasticsearch' + classname ='org.elasticsearch.script.expression.ExpressionPlugin' } dependencies { diff --git a/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml b/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..b05e6e3a7bf7c --- /dev/null +++ b/modules/lang-expression/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.script.expression: + - create_class_loader diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 45037857db093..e8e539d2d6b55 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -12,8 +12,8 @@ apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Mustache scripting integration for Elasticsearch' - classname 'org.elasticsearch.script.mustache.MustachePlugin' + description = 'Mustache scripting integration for Elasticsearch' + classname ='org.elasticsearch.script.mustache.MustachePlugin' } dependencies { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MustacheSettingsIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MustacheSettingsIT.java new file mode 100644 index 0000000000000..7ab3f1bd3b809 --- /dev/null +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MustacheSettingsIT.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.script.mustache; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class MustacheSettingsIT extends ESSingleNodeTestCase { + @Override + protected Collection> getPlugins() { + return List.of(MustachePlugin.class); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(MustacheScriptEngine.MUSTACHE_RESULT_SIZE_LIMIT.getKey(), "10b").build(); + } + + public void testResultSizeLimit() throws Exception { + createIndex("test"); + prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "value1").endObject()).get(); + indicesAdmin().prepareRefresh().get(); + + String query = """ + { "query": {"match_all": {}}, "size" : "{{my_size}}" }"""; + SearchRequest searchRequest = new SearchRequest(); + searchRequest.indices("test"); + var e = expectThrows( + ElasticsearchParseException.class, + () -> new SearchTemplateRequestBuilder(client()).setRequest(searchRequest) + .setScript(query) + .setScriptType(ScriptType.INLINE) + .setScriptParams(Collections.singletonMap("my_size", 1)) + .get() + ); + assertThat(e.getMessage(), equalTo("Mustache script result size limit exceeded")); + } +} diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java index b24d60cb8d887..cfdd57db4dd54 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustachePlugin.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.features.NodeFeature; @@ -73,4 +74,9 @@ public List getRestHandlers( new RestRenderSearchTemplateAction() ); } + + @Override + public List> getSettings() { + return List.of(MustacheScriptEngine.MUSTACHE_RESULT_SIZE_LIMIT); + } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index 9bb80d5688b5f..f8a56dcfc990e 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -27,7 +27,7 @@ import java.io.InputStream; public class SearchTemplateResponse extends ActionResponse implements ToXContentObject { - public static ParseField TEMPLATE_OUTPUT_FIELD = new ParseField("template_output"); + public static final ParseField TEMPLATE_OUTPUT_FIELD = new ParseField("template_output"); /** Contains the source of the rendered template **/ private BytesReference source; diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java index 990a92fb0dbf8..19069c876e7e7 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; @@ -102,7 +101,7 @@ public void testMultiSearchTemplateToJson() throws Exception { String[] indices = { "test" }; SearchRequest searchRequest = new SearchRequest(indices); // scroll is not supported in the current msearch or msearchtemplate api, so unset it: - searchRequest.scroll((Scroll) null); + searchRequest.scroll(null); // batched reduce size is currently not set-able on a per-request basis as it is a query string parameter only searchRequest.setBatchedReduceSize(SearchRequest.DEFAULT_BATCHED_REDUCE_SIZE); SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest); diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index b01c9201dcff1..b030d3c2d5e16 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -15,8 +15,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'An easy, safe and fast scripting language for Elasticsearch' - classname 'org.elasticsearch.painless.PainlessPlugin' + description = 'An easy, safe and fast scripting language for Elasticsearch' + classname ='org.elasticsearch.painless.PainlessPlugin' } testClusters.configureEach { diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index 2e7f0de027de7..37bff97a07ae2 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -9,8 +9,10 @@ package org.elasticsearch.painless.spi; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import java.lang.reflect.Constructor; @@ -140,7 +142,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep * } * } */ - public static Whitelist loadFromResourceFiles(Class resource, Map parsers, String... filepaths) { + public static Whitelist loadFromResourceFiles(Class owner, Map parsers, String... filepaths) { List whitelistClasses = new ArrayList<>(); List whitelistStatics = new ArrayList<>(); List whitelistClassBindings = new ArrayList<>(); @@ -153,7 +155,7 @@ public static Whitelist loadFromResourceFiles(Class resource, Map resource, Map) resource::getClassLoader); + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction) owner::getClassLoader); return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistClassBindings, Collections.emptyList()); } + private static InputStream getResourceAsStream(Class owner, String name) { + InputStream stream = owner.getResourceAsStream(name); + if (stream == null) { + String msg = "Whitelist file [" + + owner.getPackageName().replace(".", "/") + + "/" + + name + + "] not found from owning class [" + + owner.getName() + + "]."; + if (owner.getModule().isNamed()) { + msg += " Check that the file exists and the package [" + + owner.getPackageName() + + "] is opened " + + "to module " + + WhitelistLoader.class.getModule().getName(); + } + throw new ResourceNotFoundException(msg); + } + return stream; + } + private static List parseWhitelistAnnotations(Map parsers, String line) { List annotations; diff --git a/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java index e62d0b438b098..b46bc118e0913 100644 --- a/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java +++ b/modules/lang-painless/spi/src/test/java/org/elasticsearch/painless/WhitelistLoaderTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.painless; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistLoader; @@ -17,10 +18,18 @@ import org.elasticsearch.painless.spi.annotation.NoImportAnnotation; import org.elasticsearch.painless.spi.annotation.WhitelistAnnotationParser; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; +import java.lang.ModuleLayer.Controller; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; import java.util.HashMap; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + public class WhitelistLoaderTests extends ESTestCase { public void testUnknownAnnotations() { @@ -96,4 +105,52 @@ public void testAnnotations() { assertEquals(3, count); } + + public void testMissingWhitelistResource() { + var e = expectThrows(ResourceNotFoundException.class, () -> WhitelistLoader.loadFromResourceFiles(Whitelist.class, "missing.txt")); + assertThat( + e.getMessage(), + equalTo( + "Whitelist file [org/elasticsearch/painless/spi/missing.txt] not found" + + " from owning class [org.elasticsearch.painless.spi.Whitelist]." + ) + ); + } + + public void testMissingWhitelistResourceInModule() throws Exception { + Map sources = new HashMap<>(); + sources.put("module-info", "module m {}"); + sources.put("p.TestOwner", "package p; public class TestOwner { }"); + var classToBytes = InMemoryJavaCompiler.compile(sources); + + Path dir = createTempDir(getTestName()); + Path jar = dir.resolve("m.jar"); + Map jarEntries = new HashMap<>(); + jarEntries.put("module-info.class", classToBytes.get("module-info")); + jarEntries.put("p/TestOwner.class", classToBytes.get("p.TestOwner")); + jarEntries.put("p/resource.txt", "# test resource".getBytes(StandardCharsets.UTF_8)); + JarUtils.createJarWithEntries(jar, jarEntries); + + try (var loader = JarUtils.loadJar(jar)) { + Controller controller = JarUtils.loadModule(jar, loader.classloader(), "m"); + Module module = controller.layer().findModule("m").orElseThrow(); + + Class ownerClass = module.getClassLoader().loadClass("p.TestOwner"); + + // first check we get a nice error message when accessing the resource + var e = expectThrows(ResourceNotFoundException.class, () -> WhitelistLoader.loadFromResourceFiles(ownerClass, "resource.txt")); + assertThat( + e.getMessage(), + equalTo( + "Whitelist file [p/resource.txt] not found from owning class [p.TestOwner]." + + " Check that the file exists and the package [p] is opened to module null" + ) + ); + + // now check we can actually read it once the package is opened to us + controller.addOpens(module, "p", WhitelistLoader.class.getModule()); + var whitelist = WhitelistLoader.loadFromResourceFiles(ownerClass, "resource.txt"); + assertThat(whitelist, notNullValue()); + } + } } diff --git a/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml b/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..d7e4ad872fc32 --- /dev/null +++ b/modules/lang-painless/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +org.elasticsearch.painless: + - create_class_loader diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt index a5118db4876cb..e76db7cfb1d26 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.score.txt @@ -50,7 +50,5 @@ static_import { double cosineSimilarity(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$CosineSimilarity double dotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$DotProduct double hamming(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$Hamming - double maxSimDotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.RankVectorsScoreScriptUtils$MaxSimDotProduct - double maxSimInvHamming(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.RankVectorsScoreScriptUtils$MaxSimInvHamming } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml index 25088f51e2b59..1434450b65a6a 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml @@ -221,9 +221,6 @@ setup: - close_to: {hits.hits.2._score: {value: 186.34454, error: 0.01}} --- "Test hamming distance fails on float": - - requires: - cluster_features: ["script.hamming"] - reason: "support for hamming distance added in 8.15" - do: headers: Content-Type: application/json diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml index cdd65ca0eb296..05a10ffdbccdb 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml @@ -1,7 +1,5 @@ setup: - requires: - cluster_features: ["mapper.vectors.bit_vectors"] - reason: "support for bit vectors added in 8.15" test_runner_features: headers - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml index 373f048e7be78..a6c111be681f9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml @@ -1,7 +1,5 @@ setup: - requires: - cluster_features: ["script.hamming"] - reason: "support for hamming distance added in 8.15" test_runner_features: headers - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml index f82b844f01588..3a869640993f4 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: ["script.term_stats"] - reason: "support for term stats has been added in 8.16" - - do: indices.create: index: test-index diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml index de4d6530f4a92..3a9c71e3c2bab 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: ["script.term_stats"] - reason: "support for term stats has been added in 8.16" - - do: indices.create: index: test-index diff --git a/modules/legacy-geo/build.gradle b/modules/legacy-geo/build.gradle index 55171221396a3..4ed8d84ab96bc 100644 --- a/modules/legacy-geo/build.gradle +++ b/modules/legacy-geo/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Placeholder plugin for geospatial features in ES' - classname 'org.elasticsearch.legacygeo.LegacyGeoPlugin' + description = 'Placeholder plugin for geospatial features in ES' + classname = 'org.elasticsearch.legacygeo.LegacyGeoPlugin' } dependencies { diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java index 37c31c8af47b0..d2dd5b7442dd2 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/GeoBoundingBoxQueryLegacyGeoShapeIT.java @@ -45,6 +45,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java index 918c343b79b7b..73b7c07c45fe5 100644 --- a/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java +++ b/modules/legacy-geo/src/internalClusterTest/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeIT.java @@ -41,7 +41,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { @Override protected IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } @Override diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 1616d2727bf8a..6127b4beb71ff 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -46,6 +46,7 @@ import org.elasticsearch.legacygeo.builders.ShapeBuilder; import org.elasticsearch.legacygeo.parsers.ShapeParser; import org.elasticsearch.legacygeo.query.LegacyGeoShapeQueryProcessor; +import org.elasticsearch.lucene.spatial.CoordinateEncoder; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.locationtech.spatial4j.shape.Point; @@ -358,7 +359,7 @@ public LegacyGeoShapeFieldMapper build(MapperBuilderContext context) { } @Deprecated - public static Mapper.TypeParser PARSER = (name, node, parserContext) -> { + public static final Mapper.TypeParser PARSER = (name, node, parserContext) -> { boolean ignoreMalformedByDefault = IGNORE_MALFORMED_SETTING.get(parserContext.getSettings()); boolean coerceByDefault = COERCE_SETTING.get(parserContext.getSettings()); FieldMapper.Builder builder = new LegacyGeoShapeFieldMapper.Builder( @@ -530,6 +531,17 @@ public PrefixTreeStrategy resolvePrefixTreeStrategy(String strategyName) { protected Function>, List> getFormatter(String format) { return GeometryFormatterFactory.getFormatter(format, ShapeBuilder::buildGeometry); } + + @Override + protected boolean isBoundsExtractionSupported() { + // Extracting bounds for geo shapes is not implemented yet. + return false; + } + + @Override + protected CoordinateEncoder coordinateEncoder() { + return CoordinateEncoder.GEO; + } } private final IndexVersion indexCreatedVersion; diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java index 9b83cd9ffdb2b..bd5b289abc588 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.Line; @@ -344,8 +343,6 @@ public void testParsePolygon() throws IOException, ParseException { assertGeometryEquals(p, polygonGeoJson, false); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParse3DPolygon() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java index 5d0df9215ef25..f944a368b2a6c 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.MultiLine; @@ -303,8 +302,6 @@ public void testParseMixedDimensionPolyWithHole() throws IOException, ParseExcep assertThat(e, hasToString(containsString("coordinate dimensions do not match"))); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0)); @@ -338,8 +335,6 @@ public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParsePolyWithStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0, 0)); @@ -363,8 +358,6 @@ public void testParsePolyWithStoredZ() throws IOException { assertEquals(shapeBuilder.numDimensions(), 3); } - @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) - @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseOpenPolygon() throws IOException { String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))"; diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java index 7352b4d88a42b..c97b0a28d22de 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java @@ -13,7 +13,6 @@ import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; -import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; @@ -21,7 +20,6 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -56,8 +54,6 @@ import static org.mockito.Mockito.when; @SuppressWarnings("deprecation") -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldMapperTests extends MapperTestCase { @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java index bf616c8190324..f5e09f19c1a71 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java @@ -8,9 +8,7 @@ */ package org.elasticsearch.legacygeo.mapper; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.geo.SpatialStrategy; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.FieldTypeTestCase; @@ -23,8 +21,6 @@ import java.util.List; import java.util.Map; -@UpdateForV9(owner = UpdateForV9.Owner.SEARCH_ANALYTICS) -@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldTypeTests extends FieldTypeTestCase { /** diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index 992f39a22b28c..4ce00ab873034 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -12,8 +12,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Adds advanced field mappers' - classname 'org.elasticsearch.index.mapper.extras.MapperExtrasPlugin' + description = 'Adds advanced field mappers' + classname ='org.elasticsearch.index.mapper.extras.MapperExtrasPlugin' } restResources { diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java index fd0098851c5f8..db2762a028e6a 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java @@ -127,7 +127,7 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) } } - public static TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); private final boolean index; private final boolean hasDocValues; diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 0d34b5f6e3b40..efe52225121cd 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'This module adds the support parent-child queries and aggregations' - classname 'org.elasticsearch.join.ParentJoinPlugin' + description = 'This module adds the support parent-child queries and aggregations' + classname = 'org.elasticsearch.join.ParentJoinPlugin' } restResources { diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 2d2f6767f5e62..3206834521797 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Percolator module adds capability to index queries and query these queries by specifying documents' - classname 'org.elasticsearch.percolator.PercolatorPlugin' + description = 'Percolator module adds capability to index queries and query these queries by specifying documents' + classname = 'org.elasticsearch.percolator.PercolatorPlugin' } dependencies { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 85af5b120f6fd..c150f01153d35 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -43,7 +43,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexVersion; @@ -80,7 +79,6 @@ import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.function.BiConsumer; import java.util.function.Supplier; import static org.elasticsearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; @@ -88,20 +86,12 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class PercolateQueryBuilder extends AbstractQueryBuilder { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ParseField.class); - static final String DOCUMENT_TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. " - + "The [document_type] should no longer be specified."; - static final String TYPE_DEPRECATION_MESSAGE = "[types removal] Types are deprecated in [percolate] queries. " - + "The [type] of the indexed document should no longer be specified."; - public static final String NAME = "percolate"; static final ParseField DOCUMENT_FIELD = new ParseField("document"); static final ParseField DOCUMENTS_FIELD = new ParseField("documents"); private static final ParseField NAME_FIELD = new ParseField("name"); private static final ParseField QUERY_FIELD = new ParseField("field"); - private static final ParseField DOCUMENT_TYPE_FIELD = new ParseField("document_type"); - private static final ParseField INDEXED_DOCUMENT_FIELD_TYPE = new ParseField("type"); private static final ParseField INDEXED_DOCUMENT_FIELD_INDEX = new ParseField("index"); private static final ParseField INDEXED_DOCUMENT_FIELD_ID = new ParseField("id"); private static final ParseField INDEXED_DOCUMENT_FIELD_ROUTING = new ParseField("routing"); @@ -368,10 +358,6 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep ); } - private static BiConsumer deprecateAndIgnoreType(String key, String message) { - return (target, type) -> deprecationLogger.compatibleCritical(key, message); - } - private static BytesReference parseDocument(XContentParser parser) throws IOException { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.copyCurrentStructure(parser); diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index c9016798c18b9..fe7453750647b 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The Rank Eval module adds APIs to evaluate ranking quality.' - classname 'org.elasticsearch.index.rankeval.RankEvalPlugin' + description = 'The Rank Eval module adds APIs to evaluate ranking quality.' + classname = 'org.elasticsearch.index.rankeval.RankEvalPlugin' } restResources { diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 7281c161e2c4a..47a3f51115b1d 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -21,8 +21,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' - classname 'org.elasticsearch.reindex.ReindexPlugin' + description = 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' + classname = 'org.elasticsearch.reindex.ReindexPlugin' } testClusters.configureEach { @@ -135,7 +135,8 @@ if (OS.current() == OS.WINDOWS) { TaskProvider fixture = tasks.register("oldEs${version}Fixture", AntFixture) { dependsOn project.configurations.oldesFixture, jdks.legacy, oldEsDependency executable = "${buildParams.runtimeJavaHome.get()}/bin/java" - env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}" + def oldesFixtureConfiguration = project.configurations.oldesFixture + env 'CLASSPATH', "${-> oldesFixtureConfiguration.asPath}" // old versions of Elasticsearch need JAVA_HOME env 'JAVA_HOME', jdks.legacy.javaHomePath // If we are running on certain arm systems we need to explicitly set the stack size to overcome JDK page size bug diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java index a4b030e3c793f..4f56b6d88b634 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java @@ -59,10 +59,10 @@ public void testReindexWithShutdown() throws Exception { final String dataNodeName = internalCluster().startDataOnlyNode(); /* Maximum time to wait for reindexing tasks to complete before shutdown */ - final Settings COORD_SETTINGS = Settings.builder() + final Settings coordSettings = Settings.builder() .put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(60)) .build(); - final String coordNodeName = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + final String coordNodeName = internalCluster().startCoordinatingOnlyNode(coordSettings); ensureStableCluster(3); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java index fe591387e9b35..de90ff97e6a95 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java @@ -43,7 +43,6 @@ import org.elasticsearch.script.Metadata; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -212,7 +211,7 @@ static > SearchRequest prep if (mainRequest.getMaxDocs() != MAX_DOCS_ALL_MATCHES && mainRequest.getMaxDocs() <= preparedSearchRequest.source().size() && mainRequest.isAbortOnVersionConflict()) { - preparedSearchRequest.scroll((Scroll) null); + preparedSearchRequest.scroll(null); } return preparedSearchRequest; diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java index ddf73e313e830..bb2073849edc3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java @@ -53,7 +53,7 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, Request request = new Request("POST", path.toString()); if (searchRequest.scroll() != null) { - TimeValue keepAlive = searchRequest.scroll().keepAlive(); + TimeValue keepAlive = searchRequest.scroll(); // V_5_0_0 if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java index 5c3db5aaa6cda..cc08357aa4081 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java @@ -100,7 +100,7 @@ private void onStartResponse(RejectAwareActionListener searchListener, @Override protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, RejectAwareActionListener searchListener) { - TimeValue keepAlive = timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()); + TimeValue keepAlive = timeValueNanos(searchRequest.scroll().nanos() + extraKeepAlive.nanos()); execute(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index 83e4695829373..28f2eafc20a6e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -199,7 +199,6 @@ public void testStartRetriesOnRejectionButFailsOnTooManyRejections() throws Exce DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff(); action.start(); assertBusy(() -> assertEquals(testRequest.getMaxRetries() + 1, client.searchAttempts.get())); - assertBusy(() -> assertTrue(listener.isDone())); ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get()); assertThat(ExceptionsHelper.stackTrace(e), containsString(EsRejectedExecutionException.class.getSimpleName())); assertNull("There shouldn't be a search attempt pending that we didn't reject", client.lastSearch.get()); @@ -598,7 +597,7 @@ protected RequestWrapper buildRequest(Hit doc) { capturedCommand.get().run(); // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) - assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); + assertThat(client.lastScroll.get().request.scroll().seconds(), either(equalTo(110L)).or(equalTo(109L))); // Now we can simulate a response and check the delay that we used for the task if (randomBoolean()) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 1c104cbd08197..5f4e2b3a55156 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -155,10 +155,7 @@ public void testScrollKeepAlive() { ); hitSource.startNextScroll(timeValueSeconds(100)); - client.validateRequest( - TransportSearchScrollAction.TYPE, - (SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110) - ); + client.validateRequest(TransportSearchScrollAction.TYPE, (SearchScrollRequest r) -> assertEquals(r.scroll().seconds(), 110)); } private SearchResponse createSearchResponse() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java index 766c3ff695f84..bcc6177f8363c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java @@ -113,14 +113,20 @@ private static SSLContext buildServerSslContext() throws Exception { } public void testClientFailsWithUntrustedCertificate() throws IOException { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final List threads = new ArrayList<>(); final Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); final Settings settings = builder.build(); final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); try (RestClient client = Reindexer.buildRestClient(getRemoteInfo(), ssl, 1L, threads)) { - expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + if (inFipsJvm()) { + // Bouncy Castle throws a different exception + IOException exception = expectThrows(IOException.class, () -> client.performRequest(new Request("GET", "/"))); + assertThat(exception.getCause(), Matchers.instanceOf(javax.net.ssl.SSLException.class)); + } else { + expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + + } } } diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index 8c1ca3891bc1e..ded138efb4af1 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -15,8 +15,8 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The Azure Repository plugin adds support for Azure storage repositories.' - classname 'org.elasticsearch.repositories.azure.AzureRepositoryPlugin' + description = 'The Azure Repository plugin adds support for Azure storage repositories.' + classname ='org.elasticsearch.repositories.azure.AzureRepositoryPlugin' } versions << [ @@ -56,7 +56,7 @@ dependencies { api "io.projectreactor.netty:reactor-netty-core:${versions.azureReactorNetty}" api "io.projectreactor.netty:reactor-netty-http:${versions.azureReactorNetty}" api "io.projectreactor:reactor-core:3.4.38" - api "org.reactivestreams:reactive-streams:1.0.4" + api "org.reactivestreams:reactive-streams:${versions.reactive_streams}" // Others api "com.fasterxml.woodstox:woodstox-core:6.7.0" diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 7373ed9485784..4c7d42e6080cd 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -37,7 +37,7 @@ public class AzureStorageService { * The maximum size of a BlockBlob block. * See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs */ - public static ByteSizeValue MAX_BLOCK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB); + public static final ByteSizeValue MAX_BLOCK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB); /** * The maximum number of blocks. diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 968e93cf9fc55..175abe183106b 100644 --- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -251,11 +251,6 @@ setup: --- "Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - do: cluster.stats: {} diff --git a/modules/repository-gcs/build.gradle b/modules/repository-gcs/build.gradle index 811645d154c7a..d23a0f4a7e44d 100644 --- a/modules/repository-gcs/build.gradle +++ b/modules/repository-gcs/build.gradle @@ -17,8 +17,8 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The GCS repository plugin adds Google Cloud Storage support for repositories.' - classname 'org.elasticsearch.repositories.gcs.GoogleCloudStoragePlugin' + description = 'The GCS repository plugin adds Google Cloud Storage support for repositories.' + classname ='org.elasticsearch.repositories.gcs.GoogleCloudStoragePlugin' } dependencies { diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index edcf03580da09..3ed492881afa9 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -145,13 +145,11 @@ public void compareAndExchangeRegister( BytesReference updated, ActionListener listener ) { - if (skipCas(listener)) return; ActionListener.completeWith(listener, () -> blobStore.compareAndExchangeRegister(buildKey(key), path, key, expected, updated)); } @Override public void getRegister(OperationPurpose purpose, String key, ActionListener listener) { - if (skipCas(listener)) return; ActionListener.completeWith(listener, () -> blobStore.getRegister(buildKey(key), path, key)); } } diff --git a/modules/repository-gcs/src/main/plugin-metadata/entitlement-policy.yaml b/modules/repository-gcs/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..a1ff54f02d969 --- /dev/null +++ b/modules/repository-gcs/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +ALL-UNNAMED: + - set_https_connection_properties # required by google-http-client diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index a53ec71f66376..5700fa6de63fa 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -59,9 +59,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeEnd; -import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeLimit; -import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeStart; import static fixture.gcs.GoogleCloudStorageHttpHandler.parseMultipartRequestBody; import static fixture.gcs.TestUtils.createServiceAccount; import static java.nio.charset.StandardCharsets.UTF_8; @@ -369,14 +366,14 @@ public void testWriteLargeBlob() throws IOException { assertThat(Math.toIntExact(requestBody.length()), anyOf(equalTo(defaultChunkSize), equalTo(lastChunkSize))); - final int rangeStart = getContentRangeStart(range); - final int rangeEnd = getContentRangeEnd(range); + final HttpHeaderParser.ContentRange contentRange = HttpHeaderParser.parseContentRangeHeader(range); + final int rangeStart = Math.toIntExact(contentRange.start()); + final int rangeEnd = Math.toIntExact(contentRange.end()); assertThat(rangeEnd + 1 - rangeStart, equalTo(Math.toIntExact(requestBody.length()))); assertThat(new BytesArray(data, rangeStart, rangeEnd - rangeStart + 1), is(requestBody)); bytesReceived.updateAndGet(existing -> Math.max(existing, rangeEnd)); - final Integer limit = getContentRangeLimit(range); - if (limit != null) { + if (contentRange.size() != null) { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); return; } else { diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index e8c34a4b6a20b..d2370919297a3 100644 --- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -234,11 +234,6 @@ setup: --- "Usage stats": - - requires: - cluster_features: - - repositories.supports_usage_stats - reason: requires this feature - - do: cluster.stats: {} diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index f0dc1ca714958..1db83b9e9bc42 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -14,8 +14,8 @@ apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-java-rest-test' esplugin { - description 'The S3 repository plugin adds S3 repositories' - classname 'org.elasticsearch.repositories.s3.S3RepositoryPlugin' + description = 'The S3 repository plugin adds S3 repositories' + classname ='org.elasticsearch.repositories.s3.S3RepositoryPlugin' } dependencies { @@ -44,12 +44,14 @@ dependencies { internalClusterTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" yamlRestTestImplementation project(':modules:repository-s3') + yamlRestTestImplementation project(':test:fixtures:aws-fixture-utils') yamlRestTestImplementation project(':test:fixtures:s3-fixture') yamlRestTestImplementation project(':test:fixtures:testcontainer-utils') yamlRestTestImplementation project(':test:framework') yamlRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" javaRestTestImplementation project(':modules:repository-s3') + javaRestTestImplementation project(':test:fixtures:aws-fixture-utils') javaRestTestImplementation project(':test:fixtures:aws-sts-fixture') javaRestTestImplementation project(':test:fixtures:ec2-imds-fixture') javaRestTestImplementation project(':test:fixtures:minio-fixture') diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java index 45844703683bb..10901424512a3 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3BasicCredentialsRestIT.java @@ -20,6 +20,8 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import static fixture.aws.AwsCredentialsUtils.fixedAccessKey; + @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3BasicCredentialsRestIT extends AbstractRepositoryS3RestTestCase { @@ -31,7 +33,7 @@ public class RepositoryS3BasicCredentialsRestIT extends AbstractRepositoryS3Rest private static final String SECRET_KEY = PREFIX + "secret-key"; private static final String CLIENT = "basic_credentials_client"; - private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, S3HttpFixture.fixedAccessKey(ACCESS_KEY)); + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, fixedAccessKey(ACCESS_KEY)); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java index 4f0bf83000642..c525c2ea42f58 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsCredentialsRestIT.java @@ -9,10 +9,10 @@ package org.elasticsearch.repositories.s3; +import fixture.aws.DynamicAwsCredentials; import fixture.aws.imds.Ec2ImdsHttpFixture; import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; -import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -35,14 +35,14 @@ public class RepositoryS3EcsCredentialsRestIT extends AbstractRepositoryS3RestTe private static final String BASE_PATH = PREFIX + "base_path"; private static final String CLIENT = "ecs_credentials_client"; - private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicS3Credentials::addValidCredentials) + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicCredentials::addValidCredentials) .alternativeCredentialsEndpoints(Set.of("/ecs_credentials_endpoint")) ); - private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java index bc41b9fd62ca9..bc689ea52ca32 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV1CredentialsRestIT.java @@ -9,10 +9,10 @@ package org.elasticsearch.repositories.s3; +import fixture.aws.DynamicAwsCredentials; import fixture.aws.imds.Ec2ImdsHttpFixture; import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; -import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -33,13 +33,13 @@ public class RepositoryS3ImdsV1CredentialsRestIT extends AbstractRepositoryS3Res private static final String BASE_PATH = PREFIX + "base_path"; private static final String CLIENT = "imdsv1_credentials_client"; - private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicS3Credentials::addValidCredentials) + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V1).newCredentialsConsumer(dynamicCredentials::addValidCredentials) ); - private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java index 34500ff5227f1..dedf205d3def2 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ImdsV2CredentialsRestIT.java @@ -9,10 +9,10 @@ package org.elasticsearch.repositories.s3; +import fixture.aws.DynamicAwsCredentials; import fixture.aws.imds.Ec2ImdsHttpFixture; import fixture.aws.imds.Ec2ImdsServiceBuilder; import fixture.aws.imds.Ec2ImdsVersion; -import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -33,13 +33,13 @@ public class RepositoryS3ImdsV2CredentialsRestIT extends AbstractRepositoryS3Res private static final String BASE_PATH = PREFIX + "base_path"; private static final String CLIENT = "imdsv2_credentials_client"; - private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(); private static final Ec2ImdsHttpFixture ec2ImdsHttpFixture = new Ec2ImdsHttpFixture( - new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).newCredentialsConsumer(dynamicS3Credentials::addValidCredentials) + new Ec2ImdsServiceBuilder(Ec2ImdsVersion.V2).newCredentialsConsumer(dynamicCredentials::addValidCredentials) ); - private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + private static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() .module("repository-s3") diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java index 93915e8491d5b..3d7c8dd150610 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java @@ -19,13 +19,15 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.util.Locale; + @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3MinioBasicCredentialsRestIT extends AbstractRepositoryS3RestTestCase { - private static final String PREFIX = getIdentifierPrefix("RepositoryS3MinioBasicCredentialsRestIT"); + private static final String PREFIX = getIdentifierPrefix("RepositoryS3MinioBasicCredentialsRestIT").toLowerCase(Locale.ROOT); private static final String BUCKET = PREFIX + "bucket"; - private static final String BASE_PATH = PREFIX + "base_path"; + private static final String BASE_PATH = PREFIX + "base-path"; private static final String ACCESS_KEY = PREFIX + "access-key"; private static final String SECRET_KEY = PREFIX + "secret-key"; private static final String CLIENT = "minio_client"; diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java index 1f09fa6b081b9..065d1c6c9ea27 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RestReloadCredentialsIT.java @@ -26,6 +26,7 @@ import java.io.IOException; +import static fixture.aws.AwsCredentialsUtils.mutableAccessKey; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; @@ -38,12 +39,7 @@ public class RepositoryS3RestReloadCredentialsIT extends ESRestTestCase { private static volatile String repositoryAccessKey; - public static final S3HttpFixture s3Fixture = new S3HttpFixture( - true, - BUCKET, - BASE_PATH, - S3HttpFixture.mutableAccessKey(() -> repositoryAccessKey) - ); + public static final S3HttpFixture s3Fixture = new S3HttpFixture(true, BUCKET, BASE_PATH, mutableAccessKey(() -> repositoryAccessKey)); private static final MutableSettingsProvider keystoreSettings = new MutableSettingsProvider(); diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java index 84a327ee131ae..a8009d594926f 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3SessionCredentialsRestIT.java @@ -20,6 +20,8 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import static fixture.aws.AwsCredentialsUtils.fixedAccessKeyAndToken; + @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3SessionCredentialsRestIT extends AbstractRepositoryS3RestTestCase { @@ -36,7 +38,7 @@ public class RepositoryS3SessionCredentialsRestIT extends AbstractRepositoryS3Re true, BUCKET, BASE_PATH, - S3HttpFixture.fixedAccessKeyAndToken(ACCESS_KEY, SESSION_TOKEN) + fixedAccessKeyAndToken(ACCESS_KEY, SESSION_TOKEN) ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java index de80e4179ef5e..61f0acdb16154 100644 --- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java +++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsCredentialsRestIT.java @@ -9,8 +9,8 @@ package org.elasticsearch.repositories.s3; +import fixture.aws.DynamicAwsCredentials; import fixture.aws.sts.AwsStsHttpFixture; -import fixture.s3.DynamicS3Credentials; import fixture.s3.S3HttpFixture; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; @@ -32,9 +32,9 @@ public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTe private static final String BASE_PATH = PREFIX + "base_path"; private static final String CLIENT = "sts_credentials_client"; - private static final DynamicS3Credentials dynamicS3Credentials = new DynamicS3Credentials(); + private static final DynamicAwsCredentials dynamicCredentials = new DynamicAwsCredentials(); - private static final S3HttpFixture s3HttpFixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicS3Credentials::isAuthorized); + private static final S3HttpFixture s3HttpFixture = new S3HttpFixture(true, BUCKET, BASE_PATH, dynamicCredentials::isAuthorized); private static final String WEB_IDENTITY_TOKEN_FILE_CONTENTS = """ Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDans\ @@ -42,7 +42,7 @@ public class RepositoryS3StsCredentialsRestIT extends AbstractRepositoryS3RestTe zTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ"""; private static final AwsStsHttpFixture stsHttpFixture = new AwsStsHttpFixture( - dynamicS3Credentials::addValidCredentials, + dynamicCredentials::addValidCredentials, WEB_IDENTITY_TOKEN_FILE_CONTENTS ); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoriesMetrics.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoriesMetrics.java index 03106c26c9a29..6f28eda81a9b9 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoriesMetrics.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoriesMetrics.java @@ -21,7 +21,7 @@ public record S3RepositoriesMetrics( LongHistogram retryDeletesHistogram ) { - public static S3RepositoriesMetrics NOOP = new S3RepositoriesMetrics(RepositoriesMetrics.NOOP); + public static final S3RepositoriesMetrics NOOP = new S3RepositoriesMetrics(RepositoriesMetrics.NOOP); public static final String METRIC_RETRY_EVENT_TOTAL = "es.repositories.s3.input_stream.retry.event.total"; public static final String METRIC_RETRY_SUCCESS_TOTAL = "es.repositories.s3.input_stream.retry.success.total"; diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 1a66f5782fc03..a8a6986ccbb7a 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -434,7 +434,7 @@ public void onFileCreated(Path file) { public void onFileChanged(Path file) { if (file.equals(webIdentityTokenFileSymlink)) { LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file); - credentialsProvider.refresh(); + SocketAccess.doPrivilegedVoid(credentialsProvider::refresh); } } }); diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 3d34934e54945..89919f0f3ddf1 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -23,6 +23,8 @@ import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import static fixture.aws.AwsCredentialsUtils.fixedAccessKey; + @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) @ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482 public class RepositoryS3ClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { @@ -34,7 +36,7 @@ public class RepositoryS3ClientYamlTestSuiteIT extends AbstractRepositoryS3Clien true, "bucket", "base_path_integration_tests", - S3HttpFixture.fixedAccessKey(ACCESS_KEY) + fixedAccessKey(ACCESS_KEY) ); public static ElasticsearchCluster cluster = ElasticsearchCluster.local() diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index fd1c5089cdf1c..857a9b00f8985 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -14,8 +14,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Module for URL repository' - classname 'org.elasticsearch.plugin.repository.url.URLRepositoryPlugin' + description = 'Module for URL repository' + classname ='org.elasticsearch.plugin.repository.url.URLRepositoryPlugin' } restResources { diff --git a/modules/rest-root/build.gradle b/modules/rest-root/build.gradle index adb8aeb02863f..a387cc4c5dd5e 100644 --- a/modules/rest-root/build.gradle +++ b/modules/rest-root/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'Adds HEAD and GET / endpoint to Elasticsearch' - classname 'org.elasticsearch.rest.root.MainRestPlugin' + description = 'Adds HEAD and GET / endpoint to Elasticsearch' + classname ='org.elasticsearch.rest.root.MainRestPlugin' } restResources { diff --git a/modules/runtime-fields-common/build.gradle b/modules/runtime-fields-common/build.gradle index e8e06f0a9c4c7..0cbb6a4d43c97 100644 --- a/modules/runtime-fields-common/build.gradle +++ b/modules/runtime-fields-common/build.gradle @@ -12,8 +12,8 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { - description 'Module for runtime fields features and extensions that have large dependencies' - classname 'org.elasticsearch.runtimefields.RuntimeFieldsCommonPlugin' + description = 'Module for runtime fields features and extensions that have large dependencies' + classname = 'org.elasticsearch.runtimefields.RuntimeFieldsCommonPlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 8eb48e1d5f638..9155554797587 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -8,8 +8,8 @@ */ esplugin { - description 'Integrates Elasticsearch with systemd' - classname 'org.elasticsearch.systemd.SystemdPlugin' + description = 'Integrates Elasticsearch with systemd' + classname ='org.elasticsearch.systemd.SystemdPlugin' } dependencies { diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 13dfdf2b3c7bc..4b64b9c56917d 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -23,8 +23,8 @@ apply plugin: 'elasticsearch.publish' * fix the hack in the build framework that copies transport-netty4 into the integ test cluster */ esplugin { - description 'Netty 4 based transport implementation' - classname 'org.elasticsearch.transport.netty4.Netty4Plugin' + description = 'Netty 4 based transport implementation' + classname = 'org.elasticsearch.transport.netty4.Netty4Plugin' } // exclude transitively pulled in version via the esplugin plugin to always build from fresh sources and make jar-hell checks pass diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java index 4bb27af4bd0f5..ab2fb41d5a22b 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java @@ -594,7 +594,7 @@ record Ctx(String testName, String nodeName, Bootstrap clientBootstrap, Channel @Override public void close() throws Exception { safeGet(clientChannel.close()); - safeGet(clientBootstrap.config().group().shutdownGracefully()); + safeGet(clientBootstrap.config().group().shutdownGracefully(0, 0, TimeUnit.SECONDS)); clientRespQueue.forEach(o -> { if (o instanceof FullHttpResponse resp) resp.release(); }); for (var opaqueId : ControlServerRequestPlugin.handlers.keySet()) { if (opaqueId.startsWith(testName)) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index 1a391a05add58..4809f1a1a275b 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -139,7 +139,8 @@ public void channelRead(final ChannelHandlerContext ctx, final Object msg) { } else { var contentStream = new Netty4HttpRequestBodyStream( ctx.channel(), - serverTransport.getThreadPool().getThreadContext() + serverTransport.getThreadPool().getThreadContext(), + activityTracker ); currentRequestStream = contentStream; netty4HttpRequest = new Netty4HttpRequest(readSequence++, request, contentStream); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java index ac3e3aecf97b9..0902e707b706e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStream.java @@ -16,6 +16,7 @@ import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.LastHttpContent; +import org.elasticsearch.common.network.ThreadWatchdog; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasables; import org.elasticsearch.http.HttpBody; @@ -36,6 +37,7 @@ public class Netty4HttpRequestBodyStream implements HttpBody.Stream { private final ChannelFutureListener closeListener = future -> doClose(); private final List tracingHandlers = new ArrayList<>(4); private final ThreadContext threadContext; + private final ThreadWatchdog.ActivityTracker activityTracker; private ByteBuf buf; private boolean requested = false; private boolean closing = false; @@ -46,10 +48,11 @@ public class Netty4HttpRequestBodyStream implements HttpBody.Stream { private volatile int bufSize = 0; private volatile boolean hasLast = false; - public Netty4HttpRequestBodyStream(Channel channel, ThreadContext threadContext) { + public Netty4HttpRequestBodyStream(Channel channel, ThreadContext threadContext, ThreadWatchdog.ActivityTracker activityTracker) { this.channel = channel; this.threadContext = threadContext; this.requestContext = threadContext.newStoredContext(); + this.activityTracker = activityTracker; Netty4Utils.addListener(channel.closeFuture(), closeListener); channel.config().setAutoRead(false); } @@ -76,15 +79,18 @@ public void next() { assert handler != null : "handler must be set before requesting next chunk"; requestContext = threadContext.newStoredContext(); channel.eventLoop().submit(() -> { + activityTracker.startActivity(); requested = true; - if (buf == null) { - channel.read(); - } else { - try { + try { + if (buf == null) { + channel.read(); + } else { send(); - } catch (Exception e) { - channel.pipeline().fireExceptionCaught(e); } + } catch (Throwable e) { + channel.pipeline().fireExceptionCaught(e); + } finally { + activityTracker.stopActivity(); } }); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java index 449fd72669dad..3feaa2874ebdc 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java @@ -91,7 +91,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin { */ private static final ByteSizeValue MTU = ByteSizeValue.ofBytes(Long.parseLong(System.getProperty("es.net.mtu", "1500"))); private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; - public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( + public static final Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = new Setting<>( SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, (s) -> { ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java index d456bbecfbd20..7492737d4f877 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpRequestBodyStreamTests.java @@ -11,6 +11,8 @@ import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.DefaultEventLoop; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultHttpContent; @@ -19,6 +21,7 @@ import io.netty.handler.flow.FlowControlHandler; import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.network.ThreadWatchdog; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpBody; @@ -27,6 +30,8 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -35,17 +40,18 @@ public class Netty4HttpRequestBodyStreamTests extends ESTestCase { + static HttpBody.ChunkHandler discardHandler = (chunk, isLast) -> chunk.close(); private final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); private EmbeddedChannel channel; private Netty4HttpRequestBodyStream stream; - static HttpBody.ChunkHandler discardHandler = (chunk, isLast) -> chunk.close(); + private ThreadWatchdog.ActivityTracker activityTracker; @Override public void setUp() throws Exception { super.setUp(); channel = new EmbeddedChannel(); - threadContext.putHeader("header1", "value1"); - stream = new Netty4HttpRequestBodyStream(channel, threadContext); + activityTracker = new ThreadWatchdog.ActivityTracker(); + stream = new Netty4HttpRequestBodyStream(channel, threadContext, activityTracker); stream.setHandler(discardHandler); // set default handler, each test might override one channel.pipeline().addLast(new SimpleChannelInboundHandler(false) { @Override @@ -128,57 +134,112 @@ public void testReadFromChannel() { } public void testReadFromHasCorrectThreadContext() throws InterruptedException { - var gotLast = new AtomicBoolean(false); AtomicReference> headers = new AtomicReference<>(); - stream.setHandler(new HttpBody.ChunkHandler() { - @Override - public void onNext(ReleasableBytesReference chunk, boolean isLast) { - headers.set(threadContext.getHeaders()); - gotLast.set(isLast); - chunk.close(); - } - - @Override - public void close() { - headers.set(threadContext.getHeaders()); - } - }); - channel.pipeline().addFirst(new FlowControlHandler()); // block all incoming messages, need explicit channel.read() + var eventLoop = new DefaultEventLoop(); + var gotLast = new AtomicBoolean(false); var chunkSize = 1024; + threadContext.putHeader("header1", "value1"); + try { + // activity tracker requires stream execution in the same thread, setting up stream inside event-loop + eventLoop.submit(() -> { + channel = new EmbeddedChannel(); + stream = new Netty4HttpRequestBodyStream(channel, threadContext, new ThreadWatchdog.ActivityTracker()); + channel.pipeline().addLast(new SimpleChannelInboundHandler(false) { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpContent msg) { + stream.handleNettyContent(msg); + } + }); + stream.setHandler(new HttpBody.ChunkHandler() { + @Override + public void onNext(ReleasableBytesReference chunk, boolean isLast) { + headers.set(threadContext.getHeaders()); + gotLast.set(isLast); + chunk.close(); + } + + @Override + public void close() { + headers.set(threadContext.getHeaders()); + } + }); + channel.pipeline().addFirst(new FlowControlHandler()); // block all incoming messages, need explicit channel.read() + }).await(); - channel.writeInbound(randomContent(chunkSize)); - channel.writeInbound(randomLastContent(chunkSize)); + channel.writeInbound(randomContent(chunkSize)); + channel.writeInbound(randomLastContent(chunkSize)); - threadContext.putHeader("header2", "value2"); - stream.next(); + threadContext.putHeader("header2", "value2"); + stream.next(); - Thread thread = new Thread(() -> channel.runPendingTasks()); - thread.start(); - thread.join(); + eventLoop.submit(() -> channel.runPendingTasks()).await(); + assertThat(headers.get(), hasEntry("header1", "value1")); + assertThat(headers.get(), hasEntry("header2", "value2")); - assertThat(headers.get(), hasEntry("header1", "value1")); - assertThat(headers.get(), hasEntry("header2", "value2")); + threadContext.putHeader("header3", "value3"); + stream.next(); - threadContext.putHeader("header3", "value3"); - stream.next(); + eventLoop.submit(() -> channel.runPendingTasks()).await(); + assertThat(headers.get(), hasEntry("header1", "value1")); + assertThat(headers.get(), hasEntry("header2", "value2")); + assertThat(headers.get(), hasEntry("header3", "value3")); - thread = new Thread(() -> channel.runPendingTasks()); - thread.start(); - thread.join(); + assertTrue("should receive last content", gotLast.get()); - assertThat(headers.get(), hasEntry("header1", "value1")); - assertThat(headers.get(), hasEntry("header2", "value2")); - assertThat(headers.get(), hasEntry("header3", "value3")); + headers.set(new HashMap<>()); - assertTrue("should receive last content", gotLast.get()); + stream.close(); + + assertThat(headers.get(), hasEntry("header1", "value1")); + assertThat(headers.get(), hasEntry("header2", "value2")); + assertThat(headers.get(), hasEntry("header3", "value3")); + } finally { + eventLoop.shutdownGracefully(0, 0, TimeUnit.SECONDS); + } + } - headers.set(new HashMap<>()); + public void testStreamNextActivityTracker() { + var t0 = activityTracker.get(); + var N = between(1, 10); + for (int i = 0; i < N; i++) { + channel.writeInbound(randomContent(1024)); + stream.next(); + channel.runPendingTasks(); + } + var t1 = activityTracker.get(); + assertEquals("stream#next() must trigger activity tracker: N*step=" + N + "*2=" + N * 2L + " times", t1, t0 + N * 2L); + } - stream.close(); + // ensure that we catch all exceptions and throw them into channel pipeline + public void testCatchExceptions() { + var gotExceptions = new CountDownLatch(3); // number of tests below + + channel.pipeline().addLast(new ChannelInboundHandlerAdapter() { + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + gotExceptions.countDown(); + } + }); + + // catch exception for not buffered chunk, will be thrown on channel.fireChannelRead() + stream.setHandler((a, b) -> { throw new RuntimeException(); }); + stream.next(); + channel.runPendingTasks(); + channel.writeInbound(randomContent(1)); + + // catch exception for buffered chunk, will be thrown from eventLoop.submit() + channel.writeInbound(randomContent(1)); + stream.next(); + channel.runPendingTasks(); + + // should catch OOM exceptions too, see DieWithDignity + // swallowing exceptions can result in dangling streams, hanging channels, and delayed shutdowns + stream.setHandler((a, b) -> { throw new OutOfMemoryError(); }); + channel.writeInbound(randomContent(1)); + stream.next(); + channel.runPendingTasks(); - assertThat(headers.get(), hasEntry("header1", "value1")); - assertThat(headers.get(), hasEntry("header2", "value2")); - assertThat(headers.get(), hasEntry("header3", "value3")); + safeAwait(gotExceptions); } HttpContent randomContent(int size, boolean isLast) { diff --git a/muted-tests.yml b/muted-tests.yml index 2689f02cc92cd..9766d3ed35f18 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -40,9 +40,6 @@ tests: - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test33JavaChanged issue: https://github.com/elastic/elasticsearch/issues/113177 -- class: org.elasticsearch.smoketest.MlWithSecurityIT - method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} - issue: https://github.com/elastic/elasticsearch/issues/108997 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test80JavaOptsInEnvVar issue: https://github.com/elastic/elasticsearch/issues/113219 @@ -55,9 +52,6 @@ tests: - class: org.elasticsearch.xpack.transform.integration.TransformIT method: testStopWaitForCheckpoint issue: https://github.com/elastic/elasticsearch/issues/106113 -- class: org.elasticsearch.kibana.KibanaThreadPoolIT - method: testBlockedThreadPoolsRejectUserRequests - issue: https://github.com/elastic/elasticsearch/issues/113939 - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformAgnosticVariant issue: https://github.com/elastic/elasticsearch/issues/113983 @@ -67,9 +61,6 @@ tests: - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformSpecificVariant issue: https://github.com/elastic/elasticsearch/issues/113950 -- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT - method: test {yaml=reference/rest-api/usage/line_38} - issue: https://github.com/elastic/elasticsearch/issues/113694 - class: org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityWithApmTracingRestIT method: testTracingCrossCluster issue: https://github.com/elastic/elasticsearch/issues/112731 @@ -90,12 +81,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/115816 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=cat.shards/10_basic/Help} - issue: https://github.com/elastic/elasticsearch/issues/116110 -- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT - method: testLookbackWithIndicesOptions - issue: https://github.com/elastic/elasticsearch/issues/116127 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Test start already started transform} issue: https://github.com/elastic/elasticsearch/issues/98802 @@ -111,9 +96,6 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 -- class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT - method: testILMDownsampleRollingRestart - issue: https://github.com/elastic/elasticsearch/issues/114233 - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 @@ -122,8 +104,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=snapshot/20_operator_privileges_disabled/Operator only settings can be set and restored by non-operator user when operator privileges is disabled} issue: https://github.com/elastic/elasticsearch/issues/116775 -- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT - issue: https://github.com/elastic/elasticsearch/issues/116851 - class: org.elasticsearch.search.basic.SearchWithRandomIOExceptionsIT method: testRandomDirectoryIOExceptions issue: https://github.com/elastic/elasticsearch/issues/114824 @@ -154,122 +134,42 @@ tests: - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/117349 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + method: test {p0=inference/30_semantic_text_inference_bwc/Calculates embeddings using the default ELSER 2 endpoint} + issue: https://github.com/elastic/elasticsearch/issues/117349 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_reset/Test reset running transform} issue: https://github.com/elastic/elasticsearch/issues/117473 -- class: org.elasticsearch.repositories.s3.RepositoryS3EcsClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/117525 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} - issue: https://github.com/elastic/elasticsearch/issues/116777 -- class: "org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.single_node.EsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/117641 -- class: "org.elasticsearch.xpack.esql.qa.mixed.MultiClusterEsqlSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/118460 -- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" - method: "test {scoring.*}" - issue: https://github.com/elastic/elasticsearch/issues/118460 -- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT - method: test {scoring.QstrWithFieldAndScoringSortedEval} - issue: https://github.com/elastic/elasticsearch/issues/117751 - class: org.elasticsearch.search.ccs.CrossClusterIT method: testCancel issue: https://github.com/elastic/elasticsearch/issues/108061 - class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT method: test {p0=search.highlight/50_synthetic_source/text multi unified from vectors} issue: https://github.com/elastic/elasticsearch/issues/117815 -- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT - issue: https://github.com/elastic/elasticsearch/issues/111319 -- class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests - method: test20NoAutoGenerationWhenAutoConfigurationDisabled - issue: https://github.com/elastic/elasticsearch/issues/117891 -- class: org.elasticsearch.packaging.test.BootstrapCheckTests - method: test20RunWithBootstrapChecks - issue: https://github.com/elastic/elasticsearch/issues/117890 - class: org.elasticsearch.xpack.esql.plugin.ClusterRequestTests method: testFallbackIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/117937 - class: org.elasticsearch.xpack.ml.integration.RegressionIT method: testTwoJobsWithSameRandomizeSeedUseSameTrainingSet issue: https://github.com/elastic/elasticsearch/issues/117805 -- class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests - method: test30NoAutogenerationWhenDaemonized - issue: https://github.com/elastic/elasticsearch/issues/117956 -- class: org.elasticsearch.packaging.test.CertGenCliTests - method: test40RunWithCert - issue: https://github.com/elastic/elasticsearch/issues/117955 -- class: org.elasticsearch.upgrades.QueryBuilderBWCIT - method: testQueryBuilderBWC {cluster=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/116990 -- class: org.elasticsearch.xpack.restart.QueryBuilderBWCIT - method: testQueryBuilderBWC {p0=UPGRADED} - issue: https://github.com/elastic/elasticsearch/issues/116989 -- class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT - method: testReindexWithShutdown - issue: https://github.com/elastic/elasticsearch/issues/118040 -- class: org.elasticsearch.packaging.test.ConfigurationTests - method: test20HostnameSubstitution - issue: https://github.com/elastic/elasticsearch/issues/118028 -- class: org.elasticsearch.packaging.test.ArchiveTests - method: test40AutoconfigurationNotTriggeredWhenNodeIsMeantToJoinExistingCluster - issue: https://github.com/elastic/elasticsearch/issues/118029 -- class: org.elasticsearch.packaging.test.ConfigurationTests - method: test30SymlinkedDataPath - issue: https://github.com/elastic/elasticsearch/issues/118111 -- class: org.elasticsearch.packaging.test.KeystoreManagementTests - method: test30KeystorePasswordFromFile - issue: https://github.com/elastic/elasticsearch/issues/118123 -- class: org.elasticsearch.packaging.test.KeystoreManagementTests - method: test31WrongKeystorePasswordFromFile - issue: https://github.com/elastic/elasticsearch/issues/118123 -- class: org.elasticsearch.packaging.test.ArchiveTests - method: test41AutoconfigurationNotTriggeredWhenNodeCannotContainData - issue: https://github.com/elastic/elasticsearch/issues/118110 - class: org.elasticsearch.xpack.remotecluster.CrossClusterEsqlRCS2UnavailableRemotesIT method: testEsqlRcs2UnavailableRemoteScenarios issue: https://github.com/elastic/elasticsearch/issues/117419 -- class: org.elasticsearch.packaging.test.DebPreservationTests - method: test40RestartOnUpgrade - issue: https://github.com/elastic/elasticsearch/issues/118170 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultRerank issue: https://github.com/elastic/elasticsearch/issues/118184 - class: org.elasticsearch.xpack.esql.action.EsqlActionTaskIT method: testCancelRequestWhenFailingFetchingPages issue: https://github.com/elastic/elasticsearch/issues/118193 -- class: org.elasticsearch.packaging.test.MemoryLockingTests - method: test20MemoryLockingEnabled - issue: https://github.com/elastic/elasticsearch/issues/118195 -- class: org.elasticsearch.packaging.test.ArchiveTests - method: test42AutoconfigurationNotTriggeredWhenNodeCannotBecomeMaster - issue: https://github.com/elastic/elasticsearch/issues/118196 -- class: org.elasticsearch.packaging.test.ArchiveTests - method: test43AutoconfigurationNotTriggeredWhenTlsAlreadyConfigured - issue: https://github.com/elastic/elasticsearch/issues/118202 - class: org.elasticsearch.packaging.test.ArchiveTests method: test44AutoConfigurationNotTriggeredOnNotWriteableConfDir issue: https://github.com/elastic/elasticsearch/issues/118208 - class: org.elasticsearch.packaging.test.ArchiveTests method: test51AutoConfigurationWithPasswordProtectedKeystore issue: https://github.com/elastic/elasticsearch/issues/118212 -- class: org.elasticsearch.ingest.common.IngestCommonClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/118215 - class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT method: test {p0=data_stream/120_data_streams_stats/Multiple data stream} issue: https://github.com/elastic/elasticsearch/issues/118217 -- class: org.elasticsearch.validation.DotPrefixClientYamlTestSuiteIT - issue: https://github.com/elastic/elasticsearch/issues/118224 -- class: org.elasticsearch.packaging.test.ArchiveTests - method: test60StartAndStop - issue: https://github.com/elastic/elasticsearch/issues/118216 - class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests method: testBottomFieldSort issue: https://github.com/elastic/elasticsearch/issues/118214 @@ -279,29 +179,90 @@ tests: - class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests method: testSearcherId issue: https://github.com/elastic/elasticsearch/issues/118374 -- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT - method: test {p0=/10_info/Info} - issue: https://github.com/elastic/elasticsearch/issues/118394 -- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT - method: test {p0=/11_nodes/Additional disk information} - issue: https://github.com/elastic/elasticsearch/issues/118395 -- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT - method: test {p0=/11_nodes/Test cat nodes output with full_id set} - issue: https://github.com/elastic/elasticsearch/issues/118396 -- class: org.elasticsearch.docker.test.DockerYmlTestSuiteIT - method: test {p0=/11_nodes/Test cat nodes output} - issue: https://github.com/elastic/elasticsearch/issues/118397 -- class: org.elasticsearch.xpack.security.operator.OperatorPrivilegesIT - method: testEveryActionIsEitherOperatorOnlyOrNonOperator - issue: https://github.com/elastic/elasticsearch/issues/118220 - class: org.elasticsearch.xpack.esql.action.EsqlActionBreakerIT issue: https://github.com/elastic/elasticsearch/issues/118238 -- class: org.elasticsearch.packaging.test.DockerTests - method: test011SecurityEnabledStatus - issue: https://github.com/elastic/elasticsearch/issues/118517 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testInvalidJSON issue: https://github.com/elastic/elasticsearch/issues/116521 +- class: org.elasticsearch.xpack.ccr.rest.ShardChangesRestIT + method: testShardChangesNoOperation + issue: https://github.com/elastic/elasticsearch/issues/118800 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/indices/shard-stores/line_150} + issue: https://github.com/elastic/elasticsearch/issues/118896 +- class: org.elasticsearch.cluster.service.MasterServiceTests + method: testThreadContext + issue: https://github.com/elastic/elasticsearch/issues/118914 +- class: org.elasticsearch.xpack.security.authc.AuthenticationServiceTests + method: testInvalidToken + issue: https://github.com/elastic/elasticsearch/issues/119019 +- class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryRunAsIT + issue: https://github.com/elastic/elasticsearch/issues/115727 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/search/search-your-data/retrievers-examples/line_98} + issue: https://github.com/elastic/elasticsearch/issues/119155 +- class: org.elasticsearch.xpack.esql.action.EsqlNodeFailureIT + method: testFailureLoadingFields + issue: https://github.com/elastic/elasticsearch/issues/118000 +- class: org.elasticsearch.index.mapper.AbstractShapeGeometryFieldMapperTests + method: testCartesianBoundsBlockLoader + issue: https://github.com/elastic/elasticsearch/issues/119201 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/transforms_start_stop/Test start/stop/start transform} + issue: https://github.com/elastic/elasticsearch/issues/119508 +- class: org.elasticsearch.smoketest.MlWithSecurityIT + method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} + issue: https://github.com/elastic/elasticsearch/issues/119548 +- class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests + method: testSkipNonRootOfNestedDocuments + issue: https://github.com/elastic/elasticsearch/issues/119553 +- class: org.elasticsearch.xpack.ml.integration.ForecastIT + method: testOverflowToDisk + issue: https://github.com/elastic/elasticsearch/issues/117740 +- class: org.elasticsearch.xpack.security.authc.ldap.MultiGroupMappingIT + issue: https://github.com/elastic/elasticsearch/issues/119599 +- class: org.elasticsearch.search.profile.dfs.DfsProfilerIT + method: testProfileDfs + issue: https://github.com/elastic/elasticsearch/issues/119711 +- class: org.elasticsearch.xpack.inference.InferenceCrudIT + method: testGetServicesWithCompletionTaskType + issue: https://github.com/elastic/elasticsearch/issues/119959 +- class: org.elasticsearch.multi_cluster.MultiClusterYamlTestSuiteIT + issue: https://github.com/elastic/elasticsearch/issues/119983 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=transform/transforms_unattended/Test unattended put and start} + issue: https://github.com/elastic/elasticsearch/issues/120019 +- class: org.elasticsearch.index.mapper.IntervalThrottlerTests + method: testThrottling + issue: https://github.com/elastic/elasticsearch/issues/120023 +- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT + method: testCheckThrows {pathPrefix=denied actionName=sslSessionImpl_getSessionContext} + issue: https://github.com/elastic/elasticsearch/issues/120053 +- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT + method: testCheckThrows {pathPrefix=denied_nonmodular actionName=sslSessionImpl_getSessionContext} + issue: https://github.com/elastic/elasticsearch/issues/120054 +- class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT + method: testUpdatePolicyToAddPhasesYieldsInvalidActionsToBeSkipped + issue: https://github.com/elastic/elasticsearch/issues/118406 +- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsIT + issue: https://github.com/elastic/elasticsearch/issues/120088 +- class: org.elasticsearch.xpack.searchablesnapshots.minio.MinioSearchableSnapshotsIT + issue: https://github.com/elastic/elasticsearch/issues/120101 +- class: org.elasticsearch.repositories.s3.S3RepositoryThirdPartyTests + issue: https://github.com/elastic/elasticsearch/issues/120115 +- class: org.elasticsearch.repositories.s3.RepositoryS3MinioBasicCredentialsRestIT + issue: https://github.com/elastic/elasticsearch/issues/120117 +- class: org.elasticsearch.repositories.blobstore.testkit.analyze.MinioRepositoryAnalysisRestIT + issue: https://github.com/elastic/elasticsearch/issues/118548 +- class: org.elasticsearch.xpack.security.QueryableReservedRolesIT + method: testConfiguredReservedRolesAfterClosingAndOpeningIndex + issue: https://github.com/elastic/elasticsearch/issues/120127 +- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT + method: testOldRepoAccess + issue: https://github.com/elastic/elasticsearch/issues/120148 +- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT + method: testOldSourceOnlyRepoAccess + issue: https://github.com/elastic/elasticsearch/issues/120080 # Examples: # diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 05cd2cb44124c..0d576d316f855 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The ICU Analysis plugin integrates the Lucene ICU module into Elasticsearch, adding ICU-related analysis components.' - classname 'org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' + description = 'The ICU Analysis plugin integrates the Lucene ICU module into Elasticsearch, adding ICU-related analysis components.' + classname ='org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin' } tasks.named("forbiddenApisMain").configure { diff --git a/plugins/analysis-kuromoji/build.gradle b/plugins/analysis-kuromoji/build.gradle index bb59fed0db2ed..82fa59e5773c3 100644 --- a/plugins/analysis-kuromoji/build.gradle +++ b/plugins/analysis-kuromoji/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { - description 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch.' - classname 'org.elasticsearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin' + description = 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch.' + classname ='org.elasticsearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin' } dependencies { diff --git a/plugins/analysis-nori/build.gradle b/plugins/analysis-nori/build.gradle index 0e602886d29ce..6254a56f0657f 100644 --- a/plugins/analysis-nori/build.gradle +++ b/plugins/analysis-nori/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { - description 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch.' - classname 'org.elasticsearch.plugin.analysis.nori.AnalysisNoriPlugin' + description = 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch.' + classname ='org.elasticsearch.plugin.analysis.nori.AnalysisNoriPlugin' } dependencies { diff --git a/plugins/analysis-phonetic/build.gradle b/plugins/analysis-phonetic/build.gradle index e34a179a384ff..018e2c0e52b8d 100644 --- a/plugins/analysis-phonetic/build.gradle +++ b/plugins/analysis-phonetic/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { - description 'The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch.' - classname 'org.elasticsearch.plugin.analysis.phonetic.AnalysisPhoneticPlugin' + description = 'The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch.' + classname ='org.elasticsearch.plugin.analysis.phonetic.AnalysisPhoneticPlugin' } dependencies { diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java index 483c8ccef1202..e51d1f24a88ad 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java @@ -44,7 +44,7 @@ public void testDisallowedWithSynonyms() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); diff --git a/plugins/analysis-smartcn/build.gradle b/plugins/analysis-smartcn/build.gradle index cbc45f32bf596..b4ac03935aab5 100644 --- a/plugins/analysis-smartcn/build.gradle +++ b/plugins/analysis-smartcn/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { - description 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch.' - classname 'org.elasticsearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin' + description = 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch.' + classname ='org.elasticsearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin' } dependencies { diff --git a/plugins/analysis-stempel/build.gradle b/plugins/analysis-stempel/build.gradle index 2d2c187970d81..0fb15ec7d36d1 100644 --- a/plugins/analysis-stempel/build.gradle +++ b/plugins/analysis-stempel/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { - description 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch.' - classname 'org.elasticsearch.plugin.analysis.stempel.AnalysisStempelPlugin' + description = 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch.' + classname ='org.elasticsearch.plugin.analysis.stempel.AnalysisStempelPlugin' } dependencies { diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle index 2794e5bcfe338..2be48240a8875 100644 --- a/plugins/analysis-ukrainian/build.gradle +++ b/plugins/analysis-ukrainian/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { - description 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch.' - classname 'org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' + description = 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch.' + classname ='org.elasticsearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' } dependencies { diff --git a/plugins/build.gradle b/plugins/build.gradle index 60c65885639ed..32fd646ef0be8 100644 --- a/plugins/build.gradle +++ b/plugins/build.gradle @@ -22,10 +22,10 @@ configure(subprojects.findAll { it.parent.path == project.path }) { esplugin { // for local ES plugins, the name of the plugin is the same as the directory - name project.name + name = project.name - licenseFile rootProject.file('licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + licenseFile = rootProject.file('licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 9549236775bfe..4455e798e3a22 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' - classname 'org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin' + description = 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' + classname ='org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin' } def localVersions = versions + [ diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 95715217fa59a..a8255c1b54517 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.' - classname 'org.elasticsearch.discovery.ec2.Ec2DiscoveryPlugin' + description = 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.' + classname ='org.elasticsearch.discovery.ec2.Ec2DiscoveryPlugin' } dependencies { diff --git a/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeNoImdsIT.java b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeNoImdsIT.java index 73213090b6f93..af3b3d67951dd 100644 --- a/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeNoImdsIT.java +++ b/plugins/discovery-ec2/src/javaRestTest/java/org/elasticsearch/discovery/ec2/DiscoveryEc2AvailabilityZoneAttributeNoImdsIT.java @@ -9,30 +9,24 @@ package org.elasticsearch.discovery.ec2; -import com.amazonaws.util.EC2MetadataUtils; - import org.elasticsearch.client.Request; import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.ClassRule; import java.io.IOException; -public class DiscoveryEc2AvailabilityZoneAttributeNoImdsIT extends ESRestTestCase { +public class DiscoveryEc2AvailabilityZoneAttributeNoImdsIT extends DiscoveryEc2AvailabilityZoneAttributeTestCase { @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .plugin("discovery-ec2") - .setting(AwsEc2Service.AUTO_ATTRIBUTE_SETTING.getKey(), "true") - .build(); + // use an address which definitely isn't running an IMDS, just in case we're running these tests in EC2 + public static ElasticsearchCluster cluster = DiscoveryEc2AvailabilityZoneAttributeTestCase.buildCluster(() -> "http://127.0.0.1:1"); @Override protected String getTestRestCluster() { return cluster.getHttpAddresses(); } + @Override // the base class asserts that the attribute is set, but we don't want that here public void testAvailabilityZoneAttribute() throws IOException { - assumeTrue("test only in non-AWS environment", EC2MetadataUtils.getInstanceId() == null); - final var nodesInfoResponse = assertOKAndCreateObjectPath(client().performRequest(new Request("GET", "/_nodes/_all/_none"))); for (final var nodeId : nodesInfoResponse.evaluateMapKeys("nodes")) { assertNull(nodesInfoResponse.evaluateExact("nodes", nodeId, "attributes", "aws_availability_zone")); diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 530070f9e0073..c6beaf3f332ca 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -2,8 +2,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.' - classname 'org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin' + description = 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.' + classname ='org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin' } versions << [ diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 72cb429b49072..c9d87cd6beb1e 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -28,8 +28,9 @@ restResources { /** A task to start the GCEFixture which emulates a GCE service **/ def gceFixtureProvider = tasks.register("gceFixture", AntFixture) { - dependsOn project.sourceSets.yamlRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" + def runtimeClasspath = project.sourceSets.yamlRestTest.runtimeClasspath + dependsOn runtimeClasspath + env 'CLASSPATH', "${-> runtimeClasspath.asPath}" executable = "${buildParams.runtimeJavaHome.get()}/bin/java" args 'org.elasticsearch.cloud.gce.GCEFixture', baseDir, "${buildDir}/testclusters/yamlRestTest-1/config/unicast_hosts.txt" } diff --git a/plugins/discovery-gce/src/main/plugin-metadata/entitlement-policy.yaml b/plugins/discovery-gce/src/main/plugin-metadata/entitlement-policy.yaml new file mode 100644 index 0000000000000..a1ff54f02d969 --- /dev/null +++ b/plugins/discovery-gce/src/main/plugin-metadata/entitlement-policy.yaml @@ -0,0 +1,2 @@ +ALL-UNNAMED: + - set_https_connection_properties # required by google-http-client diff --git a/plugins/examples/custom-processor/build.gradle b/plugins/examples/custom-processor/build.gradle index 1aa15e2ed5142..6c0281d899a4e 100644 --- a/plugins/examples/custom-processor/build.gradle +++ b/plugins/examples/custom-processor/build.gradle @@ -10,11 +10,11 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'custom-processor' - description 'An example plugin showing how to register a custom ingest processor' - classname 'org.elasticsearch.example.customprocessor.ExampleProcessorPlugin' - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-processor' + description = 'An example plugin showing how to register a custom ingest processor' + classname ='org.elasticsearch.example.customprocessor.ExampleProcessorPlugin' + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index 4fc1f25e0d636..2774bf6e75c78 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -10,11 +10,11 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'custom-settings' - description 'An example plugin showing how to register custom settings' - classname 'org.elasticsearch.example.customsettings.ExampleCustomSettingsPlugin' - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-settings' + description = 'An example plugin showing how to register custom settings' + classname ='org.elasticsearch.example.customsettings.ExampleCustomSettingsPlugin' + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } testClusters.configureEach { diff --git a/plugins/examples/custom-significance-heuristic/build.gradle b/plugins/examples/custom-significance-heuristic/build.gradle index 6491e9384d55e..f2f0cefa6d6f5 100644 --- a/plugins/examples/custom-significance-heuristic/build.gradle +++ b/plugins/examples/custom-significance-heuristic/build.gradle @@ -10,11 +10,11 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'custom-significance-heuristic' - description 'An example plugin showing how to write and register a custom significance heuristic' - classname 'org.elasticsearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin' - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-significance-heuristic' + description = 'An example plugin showing how to write and register a custom significance heuristic' + classname ='org.elasticsearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin' + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index 6c9210f74327f..a1cf345f5e819 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -10,11 +10,11 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'custom-suggester' - description 'An example plugin showing how to write and register a custom suggester' - classname 'org.elasticsearch.example.customsuggester.CustomSuggesterPlugin' - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-suggester' + description = 'An example plugin showing how to write and register a custom suggester' + classname ='org.elasticsearch.example.customsuggester.CustomSuggesterPlugin' + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } testClusters.configureEach { diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index 22286c90de3d1..e712035eabc7b 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a -distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip +distributionSha256Sum=7ebdac923867a3cec0098302416d1e3c6c0c729fc4e2e05c10637a8af33a76c5 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index f6bbe0fc2b439..f501bd466ebe5 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -10,12 +10,12 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'painless-whitelist' - description 'An example whitelisting additional classes and methods in painless' - classname 'org.elasticsearch.example.painlesswhitelist.MyWhitelistPlugin' + name = 'painless-whitelist' + description = 'An example whitelisting additional classes and methods in painless' + classname ='org.elasticsearch.example.painlesswhitelist.MyWhitelistPlugin' extendedPlugins = ['lang-painless'] - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle index 93139b9b54d98..023033349dd8c 100644 --- a/plugins/examples/rescore/build.gradle +++ b/plugins/examples/rescore/build.gradle @@ -10,11 +10,11 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'example-rescore' - description 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' - classname 'org.elasticsearch.example.rescore.ExampleRescorePlugin' - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'example-rescore' + description = 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' + classname ='org.elasticsearch.example.rescore.ExampleRescorePlugin' + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index b53ef6be39d53..43590b166a545 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -10,11 +10,11 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'rest-handler' - description 'An example plugin showing how to register a REST handler' - classname 'org.elasticsearch.example.resthandler.ExampleRestHandlerPlugin' - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'rest-handler' + description = 'An example plugin showing how to register a REST handler' + classname ='org.elasticsearch.example.resthandler.ExampleRestHandlerPlugin' + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index 8947938e227a2..0fb1baaea2f03 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -10,11 +10,11 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'script-expert-scoring' - description 'An example script engine to use low level Lucene internals for expert scoring' - classname 'org.elasticsearch.example.expertscript.ExpertScriptPlugin' - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'script-expert-scoring' + description = 'An example script engine to use low level Lucene internals for expert scoring' + classname ='org.elasticsearch.example.expertscript.ExpertScriptPlugin' + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/security-authorization-engine/build.gradle b/plugins/examples/security-authorization-engine/build.gradle index 3b49a7eb4db30..faf32774a20ac 100644 --- a/plugins/examples/security-authorization-engine/build.gradle +++ b/plugins/examples/security-authorization-engine/build.gradle @@ -2,12 +2,12 @@ apply plugin: 'elasticsearch.esplugin' apply plugin: 'elasticsearch.java-rest-test' esplugin { - name 'security-authorization-engine' - description 'An example spi extension plugin for security that implements an Authorization Engine' - classname 'org.elasticsearch.example.AuthorizationEnginePlugin' + name = 'security-authorization-engine' + description = 'An example spi extension plugin for security that implements an Authorization Engine' + classname ='org.elasticsearch.example.AuthorizationEnginePlugin' extendedPlugins = ['x-pack-security'] - licenseFile rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + licenseFile = rootProject.file('AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/stable-analysis/build.gradle b/plugins/examples/stable-analysis/build.gradle index 358e2585623e3..57126853da8e8 100644 --- a/plugins/examples/stable-analysis/build.gradle +++ b/plugins/examples/stable-analysis/build.gradle @@ -2,8 +2,8 @@ apply plugin: 'elasticsearch.stable-esplugin' apply plugin: 'elasticsearch.yaml-rest-test' esplugin { - name 'stable-analysis-plugin' - description 'An example analysis plugin using stable plugin api' + name = 'stable-analysis-plugin' + description = 'An example analysis plugin using stable plugin api' } //TODO write module-info diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle index 435ad83974efa..63eb75865794c 100644 --- a/plugins/mapper-annotated-text/build.gradle +++ b/plugins/mapper-annotated-text/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { - description 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' - classname 'org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextPlugin' + description = 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' + classname ='org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextPlugin' } restResources { diff --git a/plugins/mapper-annotated-text/src/main/java/module-info.java b/plugins/mapper-annotated-text/src/main/java/module-info.java index 13f2bd66418be..58aca0d2857fe 100644 --- a/plugins/mapper-annotated-text/src/main/java/module-info.java +++ b/plugins/mapper-annotated-text/src/main/java/module-info.java @@ -15,6 +15,4 @@ requires org.apache.lucene.highlighter; // exports nothing - - provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.index.mapper.annotatedtext.Features; } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index c12849d545b33..4b2006430b89e 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -22,7 +22,6 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -64,8 +63,6 @@ **/ public class AnnotatedTextFieldMapper extends FieldMapper { - public static final NodeFeature SYNTHETIC_SOURCE_SUPPORT = new NodeFeature("mapper.annotated_text.synthetic_source"); - public static final String CONTENT_TYPE = "annotated_text"; private static Builder builder(FieldMapper in) { @@ -167,7 +164,7 @@ public AnnotatedTextFieldMapper build(MapperBuilderContext context) { } } - public static TypeParser PARSER = new TypeParser( + public static final TypeParser PARSER = new TypeParser( (n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers(), SourceFieldMapper.isSynthetic(c.getIndexSettings())) ); diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java index 8b4a9d6544b75..f636335701da5 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighter.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.lucene.search.uhighlight.CustomUnifiedHighlighter; +import org.elasticsearch.lucene.search.uhighlight.QueryMaxAnalyzedOffset; import org.elasticsearch.search.fetch.FetchSubPhase.HitContext; import org.elasticsearch.search.fetch.subphase.highlight.DefaultHighlighter; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext; @@ -52,7 +53,7 @@ protected List loadFieldValues( } @Override - protected Analyzer wrapAnalyzer(Analyzer analyzer, Integer maxAnalyzedOffset) { + protected Analyzer wrapAnalyzer(Analyzer analyzer, QueryMaxAnalyzedOffset maxAnalyzedOffset) { return new AnnotatedHighlighterAnalyzer(super.wrapAnalyzer(analyzer, maxAnalyzedOffset)); } diff --git a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification deleted file mode 100644 index 1fc11da18fc3c..0000000000000 --- a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ /dev/null @@ -1,10 +0,0 @@ -# - # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - # or more contributor license agreements. Licensed under the "Elastic License - # 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - # Public License v 1"; you may not use this file except in compliance with, at - # your election, the "Elastic License 2.0", the "GNU Affero General Public - # License v3.0 only", or the "Server Side Public License, v 1". -# - -org.elasticsearch.index.mapper.annotatedtext.Features diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java index d4c4ccfaa442d..53bb87bdf0acb 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText; import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotationAnalyzerWrapper; import org.elasticsearch.lucene.search.uhighlight.CustomUnifiedHighlighter; +import org.elasticsearch.lucene.search.uhighlight.QueryMaxAnalyzedOffset; import org.elasticsearch.lucene.search.uhighlight.Snippet; import org.elasticsearch.search.fetch.subphase.highlight.LimitTokenOffsetAnalyzer; import org.elasticsearch.test.ESTestCase; @@ -85,7 +86,7 @@ private void assertHighlightOneDoc( int noMatchSize, String[] expectedPassages, int maxAnalyzedOffset, - Integer queryMaxAnalyzedOffset + Integer queryMaxAnalyzedOffsetIn ) throws Exception { try (Directory dir = newDirectory()) { @@ -116,8 +117,9 @@ private void assertHighlightOneDoc( for (int i = 0; i < markedUpInputs.length; i++) { annotations[i] = AnnotatedText.parse(markedUpInputs[i]); } + QueryMaxAnalyzedOffset queryMaxAnalyzedOffset = QueryMaxAnalyzedOffset.create(queryMaxAnalyzedOffsetIn, maxAnalyzedOffset); if (queryMaxAnalyzedOffset != null) { - wrapperAnalyzer = new LimitTokenOffsetAnalyzer(wrapperAnalyzer, queryMaxAnalyzedOffset); + wrapperAnalyzer = new LimitTokenOffsetAnalyzer(wrapperAnalyzer, queryMaxAnalyzedOffset.getNotNull()); } AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer); hiliteAnalyzer.setAnnotations(annotations); @@ -311,6 +313,19 @@ public void testExceedMaxAnalyzedOffset() throws Exception { e.getMessage() ); + // Same as before, but force using index maxOffset (20) as queryMaxOffset by passing -1. + assertHighlightOneDoc( + "text", + new String[] { "[Long Text exceeds](Long+Text+exceeds) MAX analyzed offset)" }, + query, + Locale.ROOT, + breakIterator, + 0, + new String[] { "Long Text [exceeds](_hit_term=exceeds) MAX analyzed offset)" }, + 20, + -1 + ); + assertHighlightOneDoc( "text", new String[] { "[Long Text Exceeds](Long+Text+Exceeds) MAX analyzed offset [Long Text Exceeds](Long+Text+Exceeds)" }, diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index 0271296df934d..54423b2b990dd 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { - description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' - classname 'org.elasticsearch.plugin.mapper.MapperMurmur3Plugin' + description = 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' + classname ='org.elasticsearch.plugin.mapper.MapperMurmur3Plugin' extendedPlugins = ['lang-painless'] } diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 2440668ff57be..738fb7ab7c25e 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -63,7 +63,7 @@ public Murmur3FieldMapper build(MapperBuilderContext context) { } } - public static TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); // this only exists so a check can be done to match the field type to using murmur3 hashing... public static class Murmur3FieldType extends MappedFieldType { diff --git a/plugins/mapper-size/build.gradle b/plugins/mapper-size/build.gradle index 93fa6fc320c1f..0f9bf59767d90 100644 --- a/plugins/mapper-size/build.gradle +++ b/plugins/mapper-size/build.gradle @@ -11,8 +11,8 @@ apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The Mapper Size plugin allows document to record their uncompressed size at index time.' - classname 'org.elasticsearch.plugin.mapper.MapperSizePlugin' + description = 'The Mapper Size plugin allows document to record their uncompressed size at index time.' + classname ='org.elasticsearch.plugin.mapper.MapperSizePlugin' } restResources { diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index c2251910c3122..435849821691e 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -91,7 +91,7 @@ private void assertSizeMappingEnabled(String index, boolean enabled) throws IOEx "Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s", index ); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(index).get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get(); Map mappingSource = getMappingsResponse.getMappings().get(index).getSourceAsMap(); assertThat(errMsg, mappingSource, hasKey("_size")); String sizeAsString = mappingSource.get("_size").toString(); diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 4da7c24de80f1..7df46699b79e2 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -14,8 +14,8 @@ apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' esplugin { - description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' - classname 'org.elasticsearch.repositories.hdfs.HdfsPlugin' + description = 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' + classname ='org.elasticsearch.repositories.hdfs.HdfsPlugin' } versions << [ @@ -28,7 +28,7 @@ configurations { } dependencies { - api project(path: 'hadoop-client-api', configuration: 'shadow') + api project(path: 'hadoop-client-api', configuration: 'default') if (isEclipse) { /* * Eclipse can't pick up the shadow dependency so we point it at *something* diff --git a/plugins/repository-hdfs/hadoop-client-api/build.gradle b/plugins/repository-hdfs/hadoop-client-api/build.gradle index 24e4213780fe2..46b0d949cdee2 100644 --- a/plugins/repository-hdfs/hadoop-client-api/build.gradle +++ b/plugins/repository-hdfs/hadoop-client-api/build.gradle @@ -1,16 +1,54 @@ -apply plugin: 'elasticsearch.build' -apply plugin: 'com.gradleup.shadow' +import org.gradle.api.file.ArchiveOperations + +apply plugin: 'elasticsearch.java' + +sourceSets { + patcher +} + +configurations { + thejar { + canBeResolved = true + } +} dependencies { - implementation "org.apache.hadoop:hadoop-client-api:${project.parent.versions.hadoop}" + thejar("org.apache.hadoop:hadoop-client-api:${project.parent.versions.hadoop}") { + transitive = false + } + + patcherImplementation 'org.ow2.asm:asm:9.7.1' + patcherImplementation 'org.ow2.asm:asm-tree:9.7.1' +} + +def outputDir = layout.buildDirectory.dir("patched-classes") + +def patchTask = tasks.register("patchClasses", JavaExec) { + inputs.files(configurations.thejar).withPathSensitivity(PathSensitivity.RELATIVE) + inputs.files(sourceSets.patcher.output).withPathSensitivity(PathSensitivity.RELATIVE) + outputs.dir(outputDir) + classpath = sourceSets.patcher.runtimeClasspath + mainClass = 'org.elasticsearch.hdfs.patch.HdfsClassPatcher' + def thejar = configurations.thejar + doFirst { + args(thejar.singleFile, outputDir.get().asFile) + } } -tasks.named('shadowJar').configure { - exclude 'org/apache/hadoop/util/ShutdownHookManager$*.class' +interface InjectedArchiveOps { + @Inject ArchiveOperations getArchiveOperations() } -['jarHell', 'thirdPartyAudit', 'forbiddenApisMain', 'splitPackagesAudit'].each { - tasks.named(it).configure { - enabled = false +tasks.named('jar').configure { + dependsOn(configurations.thejar) + def injected = project.objects.newInstance(InjectedArchiveOps) + def thejar = configurations.thejar + from(patchTask) + from({ injected.getArchiveOperations().zipTree(thejar.singleFile) }) { + eachFile { + if (outputDir.get().file(it.relativePath.pathString).asFile.exists()) { + it.exclude() + } + } } } diff --git a/plugins/repository-hdfs/hadoop-client-api/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/plugins/repository-hdfs/hadoop-client-api/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java deleted file mode 100644 index c3d15dc06e7c1..0000000000000 --- a/plugins/repository-hdfs/hadoop-client-api/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.apache.hadoop.util; - -import java.util.concurrent.TimeUnit; - -/** - * A replacement for the ShutdownHookManager from hadoop. - * - * This class does not actually add a shutdown hook. Hadoop's shutdown hook - * manager does not fail gracefully when it lacks security manager permissions - * to add shutdown hooks. This implements the same api as the hadoop class, but - * with no-ops. - */ -public class ShutdownHookManager { - private static final ShutdownHookManager MGR = new ShutdownHookManager(); - - public static ShutdownHookManager get() { - return MGR; - } - - private ShutdownHookManager() {} - - public void addShutdownHook(Runnable shutdownHook, int priority) {} - - public void addShutdownHook(Runnable shutdownHook, int priority, long timeout, TimeUnit unit) {} - - public boolean removeShutdownHook(Runnable shutdownHook) { - return false; - } - - public boolean hasShutdownHook(Runnable shutdownHook) { - return false; - } - - public boolean isShutdownInProgress() { - return false; - } - - public void clearShutdownHooks() {} -} diff --git a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/HdfsClassPatcher.java b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/HdfsClassPatcher.java new file mode 100644 index 0000000000000..732c55929454e --- /dev/null +++ b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/HdfsClassPatcher.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.hdfs.patch; + +import org.objectweb.asm.ClassReader; +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.ClassWriter; + +import java.io.File; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import java.util.function.Function; +import java.util.jar.JarEntry; +import java.util.jar.JarFile; + +public class HdfsClassPatcher { + static final Map> patchers = Map.of( + "org/apache/hadoop/util/ShutdownHookManager.class", + ShutdownHookManagerPatcher::new, + "org/apache/hadoop/util/Shell.class", + ShellPatcher::new, + "org/apache/hadoop/security/UserGroupInformation.class", + SubjectGetSubjectPatcher::new, + "org/apache/hadoop/security/authentication/client/KerberosAuthenticator.class", + SubjectGetSubjectPatcher::new + ); + + public static void main(String[] args) throws Exception { + String jarPath = args[0]; + Path outputDir = Paths.get(args[1]); + + try (JarFile jarFile = new JarFile(new File(jarPath))) { + for (var patcher : patchers.entrySet()) { + JarEntry jarEntry = jarFile.getJarEntry(patcher.getKey()); + if (jarEntry == null) { + throw new IllegalArgumentException("path [" + patcher.getKey() + "] not found in [" + jarPath + "]"); + } + byte[] classToPatch = jarFile.getInputStream(jarEntry).readAllBytes(); + + ClassReader classReader = new ClassReader(classToPatch); + ClassWriter classWriter = new ClassWriter(classReader, 0); + classReader.accept(patcher.getValue().apply(classWriter), 0); + + Path outputFile = outputDir.resolve(patcher.getKey()); + Files.createDirectories(outputFile.getParent()); + Files.write(outputFile, classWriter.toByteArray()); + } + } + } +} diff --git a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/MethodReplacement.java b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/MethodReplacement.java new file mode 100644 index 0000000000000..e07a32cc294a5 --- /dev/null +++ b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/MethodReplacement.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.hdfs.patch; + +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Opcodes; + +public class MethodReplacement extends MethodVisitor { + private final MethodVisitor delegate; + private final Runnable bodyWriter; + + MethodReplacement(MethodVisitor delegate, Runnable bodyWriter) { + super(Opcodes.ASM9); + this.delegate = delegate; + this.bodyWriter = bodyWriter; + } + + @Override + public void visitCode() { + // delegate.visitCode(); + bodyWriter.run(); + // delegate.visitEnd(); + } + + @Override + public void visitMaxs(int maxStack, int maxLocals) { + delegate.visitMaxs(maxStack, maxLocals); + } +} diff --git a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/ShellPatcher.java b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/ShellPatcher.java new file mode 100644 index 0000000000000..397b63e434ba2 --- /dev/null +++ b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/ShellPatcher.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.hdfs.patch; + +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Opcodes; + +class ShellPatcher extends ClassVisitor { + + ShellPatcher(ClassWriter classWriter) { + super(Opcodes.ASM9, classWriter); + } + + @Override + public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { + MethodVisitor mv = super.visitMethod(access, name, descriptor, signature, exceptions); + if (name.equals("isSetsidSupported")) { + return new MethodReplacement(mv, () -> { + mv.visitInsn(Opcodes.ICONST_0); + mv.visitInsn(Opcodes.IRETURN); + }); + } + return mv; + } +} diff --git a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/ShutdownHookManagerPatcher.java b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/ShutdownHookManagerPatcher.java new file mode 100644 index 0000000000000..1235b5af9002f --- /dev/null +++ b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/ShutdownHookManagerPatcher.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.hdfs.patch; + +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Opcodes; +import org.objectweb.asm.Type; + +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +class ShutdownHookManagerPatcher extends ClassVisitor { + private static final String CLASSNAME = "org/apache/hadoop/util/ShutdownHookManager"; + private static final Set VOID_METHODS = Set.of("addShutdownHook", "clearShutdownHooks"); + private static final Set BOOLEAN_METHODS = Set.of("removeShutdownHook", "hasShutdownHook", "isShutdownInProgress"); + + ShutdownHookManagerPatcher(ClassWriter classWriter) { + super(Opcodes.ASM9, classWriter); + } + + @Override + public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { + MethodVisitor mv = super.visitMethod(access, name, descriptor, signature, exceptions); + if (VOID_METHODS.contains(name)) { + // make void methods noops + return new MethodReplacement(mv, () -> { mv.visitInsn(Opcodes.RETURN); }); + } else if (BOOLEAN_METHODS.contains(name)) { + // make boolean methods always return false + return new MethodReplacement(mv, () -> { + mv.visitInsn(Opcodes.ICONST_0); + mv.visitInsn(Opcodes.IRETURN); + }); + } else if (name.equals("")) { + return new MethodReplacement(mv, () -> { + // just initialize the statics, don't actually get runtime to add shutdown hook + + var classType = Type.getObjectType(CLASSNAME); + mv.visitTypeInsn(Opcodes.NEW, CLASSNAME); + mv.visitInsn(Opcodes.DUP); + mv.visitMethodInsn(Opcodes.INVOKESPECIAL, CLASSNAME, "", "()V", false); + mv.visitFieldInsn(Opcodes.PUTSTATIC, CLASSNAME, "MGR", classType.getDescriptor()); + + var timeUnitType = Type.getType(TimeUnit.class); + mv.visitFieldInsn(Opcodes.GETSTATIC, timeUnitType.getInternalName(), "SECONDS", timeUnitType.getDescriptor()); + mv.visitFieldInsn(Opcodes.PUTSTATIC, CLASSNAME, "TIME_UNIT_DEFAULT", timeUnitType.getDescriptor()); + + var executorServiceType = Type.getType(ExecutorService.class); + mv.visitInsn(Opcodes.ACONST_NULL); + mv.visitFieldInsn(Opcodes.PUTSTATIC, CLASSNAME, "EXECUTOR", executorServiceType.getDescriptor()); + + mv.visitInsn(Opcodes.RETURN); + }); + } + return mv; + } +} diff --git a/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/SubjectGetSubjectPatcher.java b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/SubjectGetSubjectPatcher.java new file mode 100644 index 0000000000000..3fb8a23be794d --- /dev/null +++ b/plugins/repository-hdfs/hadoop-client-api/src/patcher/java/org/elasticsearch/hdfs/patch/SubjectGetSubjectPatcher.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.hdfs.patch; + +import org.objectweb.asm.ClassVisitor; +import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.MethodVisitor; +import org.objectweb.asm.Type; + +import static org.objectweb.asm.Opcodes.ASM9; +import static org.objectweb.asm.Opcodes.INVOKESTATIC; +import static org.objectweb.asm.Opcodes.POP; + +class SubjectGetSubjectPatcher extends ClassVisitor { + SubjectGetSubjectPatcher(ClassWriter classWriter) { + super(ASM9, classWriter); + } + + @Override + public MethodVisitor visitMethod(int access, String name, String descriptor, String signature, String[] exceptions) { + return new ReplaceCallMethodVisitor(super.visitMethod(access, name, descriptor, signature, exceptions), name, access, descriptor); + } + + /** + * Replaces calls to Subject.getSubject(context); with calls to Subject.current(); + */ + private static class ReplaceCallMethodVisitor extends MethodVisitor { + private static final String SUBJECT_CLASS_INTERNAL_NAME = "javax/security/auth/Subject"; + private static final String METHOD_NAME = "getSubject"; + + ReplaceCallMethodVisitor(MethodVisitor methodVisitor, String name, int access, String descriptor) { + super(ASM9, methodVisitor); + } + + @Override + public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface) { + if (opcode == INVOKESTATIC && SUBJECT_CLASS_INTERNAL_NAME.equals(owner) && METHOD_NAME.equals(name)) { + // Get rid of the extra arg on the stack + mv.visitInsn(POP); + // Call Subject.current() + mv.visitMethodInsn( + INVOKESTATIC, + SUBJECT_CLASS_INTERNAL_NAME, + "current", + Type.getMethodDescriptor(Type.getObjectType(SUBJECT_CLASS_INTERNAL_NAME)), + false + ); + } else { + super.visitMethodInsn(opcode, owner, name, descriptor, isInterface); + } + } + } +} diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/40_restore.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/40_restore.yml index 716857781b758..a13ad970576fb 100644 --- a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/40_restore.yml +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/hdfs_repository/40_restore.yml @@ -49,8 +49,6 @@ - do: indices.close: index : test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" # Restore index - do: diff --git a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yml b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yml index c7a298d72d6c9..1a5d346562181 100644 --- a/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yml +++ b/plugins/repository-hdfs/src/yamlRestTest/resources/rest-api-spec/test/secure_hdfs_repository/40_restore.yml @@ -51,8 +51,6 @@ - do: indices.close: index : test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" # Restore index - do: diff --git a/plugins/store-smb/build.gradle b/plugins/store-smb/build.gradle index 1186f5359ad5c..727f9ed588673 100644 --- a/plugins/store-smb/build.gradle +++ b/plugins/store-smb/build.gradle @@ -10,8 +10,8 @@ apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { - description 'The Store SMB plugin adds support for SMB stores.' - classname 'org.elasticsearch.plugin.store.smb.SMBStorePlugin' + description = 'The Store SMB plugin adds support for SMB stores.' + classname ='org.elasticsearch.plugin.store.smb.SMBStorePlugin' } restResources { restApi { diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index e63b1629db39c..5bbade8cf6fce 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -50,8 +50,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) } - - onlyIf("FIPS mode disabled") { buildParams.inFipsJvm == false } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index aac2c661dea9f..8b74657becb24 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -12,6 +12,8 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; @@ -21,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.DisMaxQueryBuilder; @@ -51,6 +54,7 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; /** @@ -245,7 +249,19 @@ public void testQueryBuilderBWC() throws Exception { InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length); StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry) ) { - input.setTransportVersion(TransportVersion.readVersion(input)); + @UpdateForV10(owner = UpdateForV10.Owner.SEARCH_FOUNDATIONS) // won't need to read <8.8 data anymore + boolean originalClusterHasTransportVersion = parseLegacyVersion(getOldClusterVersion()).map( + v -> v.onOrAfter(VERSION_INTRODUCING_TRANSPORT_VERSIONS) + ).orElse(true); + TransportVersion transportVersion; + if (originalClusterHasTransportVersion == false) { + transportVersion = TransportVersion.fromId( + parseLegacyVersion(getOldClusterVersion()).map(Version::id).orElse(TransportVersions.MINIMUM_COMPATIBLE.id()) + ); + } else { + transportVersion = TransportVersion.readVersion(input); + } + input.setTransportVersion(transportVersion); QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); assert in.read() == -1; assertEquals(expectedQueryBuilder, queryBuilder); diff --git a/qa/logging-spi/build.gradle b/qa/logging-spi/build.gradle index 04f09b9638bfe..d6b707eb24dd5 100644 --- a/qa/logging-spi/build.gradle +++ b/qa/logging-spi/build.gradle @@ -3,9 +3,9 @@ apply plugin: 'elasticsearch.base-internal-es-plugin' apply plugin: 'elasticsearch.internal-java-rest-test' esplugin { - name 'logging-spi-test' - description 'An test plugin to test the SPI behaviour of ES logging' - classname 'org.elasticsearch.test.logging.plugin.TestLoggingPlugin' + name = 'logging-spi-test' + description = 'An test plugin to test the SPI behaviour of ES logging' + classname ='org.elasticsearch.test.logging.plugin.TestLoggingPlugin' } dependencies { diff --git a/qa/lucene-index-compatibility/build.gradle b/qa/lucene-index-compatibility/build.gradle index 37e5eea85a08b..3b2e69ec9859f 100644 --- a/qa/lucene-index-compatibility/build.gradle +++ b/qa/lucene-index-compatibility/build.gradle @@ -14,7 +14,9 @@ buildParams.bwcVersions.withLatestReadOnlyIndexCompatible { bwcVersion -> tasks.named("javaRestTest").configure { systemProperty("tests.minimum.index.compatible", bwcVersion) usesBwcDistribution(bwcVersion) - enabled = true + + // Tests rely on unreleased code in 8.18 branch + enabled = buildParams.isSnapshotBuild() } } @@ -22,4 +24,3 @@ tasks.withType(Test).configureEach { // CI doesn't like it when there's multiple clusters running at once maxParallelForks = 1 } - diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java new file mode 100644 index 0000000000000..13c647983fad5 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java @@ -0,0 +1,241 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.InputStreamEntity; +import org.elasticsearch.client.Request; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.cluster.util.Version.CURRENT; +import static org.elasticsearch.test.cluster.util.Version.fromString; +import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase { + + protected static final Version VERSION_MINUS_2 = fromString(System.getProperty("tests.minimum.index.compatible")); + protected static final Version VERSION_MINUS_1 = fromString(System.getProperty("tests.minimum.wire.compatible")); + protected static final Version VERSION_CURRENT = CURRENT; + + protected static final int NODES = 3; + + private static TemporaryFolder REPOSITORY_PATH = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(VERSION_MINUS_2) + .nodes(NODES) + .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) + .apply(() -> clusterConfig) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(REPOSITORY_PATH).around(cluster); + + private static boolean upgradeFailed = false; + + @Before + public final void maybeUpgradeBeforeTest() throws Exception { + // We want to use this test suite for the V9 upgrade, but we are not fully committed to necessarily having N-2 support + // in V10, so we add a check here to ensure we'll revisit this decision once V10 exists. + assertThat("Explicit check that N-2 version is Elasticsearch 7", VERSION_MINUS_2.getMajor(), equalTo(7)); + + if (upgradeFailed == false) { + try { + maybeUpgrade(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + } + + protected abstract void maybeUpgrade() throws Exception; + + @After + public final void deleteSnapshotBlobCache() throws IOException { + // TODO ES-10475: The .snapshot-blob-cache created in legacy version can block upgrades, we should probably delete it automatically + try { + var request = new Request("DELETE", "/.snapshot-blob-cache"); + request.setOptions( + expectWarnings( + "this request accesses system indices: [.snapshot-blob-cache], but in a future major version, " + + "direct access to system indices will be prevented by default" + ) + ); + adminClient().performRequest(request); + } catch (IOException e) { + if (isNotFoundResponseException(e) == false) { + throw e; + } + } + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + protected ElasticsearchCluster cluster() { + return cluster; + } + + protected String suffix(String name) { + return name + '-' + getTestName().split(" ")[0].toLowerCase(Locale.ROOT); + } + + protected Settings repositorySettings() { + return Settings.builder() + .put("location", REPOSITORY_PATH.getRoot().toPath().resolve(suffix("location")).toFile().getPath()) + .build(); + } + + protected static Map nodesVersions() throws Exception { + var nodesInfos = getNodesInfo(adminClient()); + assertThat(nodesInfos.size(), equalTo(NODES)); + var versions = new HashMap(); + for (var nodeInfos : nodesInfos.values()) { + versions.put((String) nodeInfos.get("name"), Version.fromString((String) nodeInfos.get("version"))); + } + return versions; + } + + protected static boolean isFullyUpgradedTo(Version version) throws Exception { + return nodesVersions().values().stream().allMatch(v -> v.equals(version)); + } + + protected static Version indexVersion(String indexName) throws Exception { + var response = assertOK(client().performRequest(new Request("GET", "/" + indexName + "/_settings"))); + int id = Integer.parseInt(createFromResponse(response).evaluate(indexName + ".settings.index.version.created")); + return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100)); + } + + protected static void indexDocs(String indexName, int numDocs) throws Exception { + var request = new Request("POST", "/_bulk"); + var docs = new StringBuilder(); + IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format(""" + {"index":{"_index":"%s"}} + {"field_0":"%s","field_1":%d,"field_2":"%s"} + """, indexName, Integer.toString(n), n, randomFrom(Locale.getAvailableLocales()).getDisplayName()))); + request.setJsonEntity(docs.toString()); + var response = assertOK(client().performRequest(request)); + assertThat(entityAsMap(response).get("errors"), allOf(notNullValue(), is(false))); + } + + protected static void mountIndex(String repository, String snapshot, String indexName, boolean partial, String renamedIndexName) + throws Exception { + var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_mount"); + request.addParameter("wait_for_completion", "true"); + var storage = partial ? "shared_cache" : "full_copy"; + request.addParameter("storage", storage); + request.setJsonEntity(Strings.format(""" + { + "index": "%s", + "renamed_index": "%s" + }""", indexName, renamedIndexName)); + var responseBody = createFromResponse(client().performRequest(request)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.successful"))); + assertThat(responseBody.evaluate("snapshot.shards.failed"), equalTo(0)); + } + + protected static void restoreIndex(String repository, String snapshot, String indexName, String renamedIndexName) throws Exception { + var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity(org.elasticsearch.common.Strings.format(""" + { + "indices": "%s", + "include_global_state": false, + "rename_pattern": "(.+)", + "rename_replacement": "%s", + "include_aliases": false + }""", indexName, renamedIndexName)); + var responseBody = createFromResponse(client().performRequest(request)); + assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); + assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + } + + protected static void updateRandomIndexSettings(String indexName) throws IOException { + final var settings = Settings.builder(); + int updates = randomIntBetween(1, 3); + for (int i = 0; i < updates; i++) { + switch (i) { + case 0 -> settings.putList(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), "field_" + randomInt(2)); + case 1 -> settings.put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), randomIntBetween(1, 100)); + case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(100L, 1000L)); + case 3 -> settings.put(IndexSettings.MAX_SLICES_PER_SCROLL.getKey(), randomIntBetween(1, 1024)); + default -> throw new IllegalStateException(); + } + } + updateIndexSettings(indexName, settings); + } + + protected static void updateRandomMappings(String indexName) throws IOException { + final var runtime = new HashMap<>(); + runtime.put("field_" + randomInt(2), Map.of("type", "keyword")); + final var properties = new HashMap<>(); + properties.put(randomIdentifier(), Map.of("type", "long")); + var body = XContentTestUtils.convertToXContent(Map.of("runtime", runtime, "properties", properties), XContentType.JSON); + var request = new Request("PUT", indexName + "/_mappings"); + request.setEntity( + new InputStreamEntity( + body.streamInput(), + body.length(), + + ContentType.create(XContentType.JSON.mediaTypeWithoutParameters()) + ) + ); + assertOK(client().performRequest(request)); + } + + protected static boolean isIndexClosed(String indexName) throws Exception { + var responseBody = createFromResponse(client().performRequest(new Request("GET", "_cluster/state/metadata/" + indexName))); + var state = responseBody.evaluate("metadata.indices." + indexName + ".state"); + return IndexMetadata.State.fromString((String) state) == IndexMetadata.State.CLOSE; + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java deleted file mode 100644 index c42e879f84892..0000000000000 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractLuceneIndexCompatibilityTestCase.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.lucene; - -import com.carrotsearch.randomizedtesting.TestMethodAndParams; -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; - -import org.elasticsearch.client.Request; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.cluster.util.Version; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.TestRule; - -import java.util.Comparator; -import java.util.Locale; -import java.util.stream.Stream; - -import static org.elasticsearch.test.cluster.util.Version.CURRENT; -import static org.elasticsearch.test.cluster.util.Version.fromString; -import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; - -/** - * Test suite for Lucene indices backward compatibility with N-2 versions. The test suite creates a cluster in N-2 version, then upgrades it - * to N-1 version and finally upgrades it to the current version. Test methods are executed after each upgrade. - */ -@TestCaseOrdering(AbstractLuceneIndexCompatibilityTestCase.TestCaseOrdering.class) -public abstract class AbstractLuceneIndexCompatibilityTestCase extends ESRestTestCase { - - protected static final Version VERSION_MINUS_2 = fromString(System.getProperty("tests.minimum.index.compatible")); - protected static final Version VERSION_MINUS_1 = fromString(System.getProperty("tests.minimum.wire.compatible")); - protected static final Version VERSION_CURRENT = CURRENT; - - protected static TemporaryFolder REPOSITORY_PATH = new TemporaryFolder(); - - protected static LocalClusterConfigProvider clusterConfig = c -> {}; - private static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .version(VERSION_MINUS_2) - .nodes(2) - .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) - .setting("xpack.security.enabled", "false") - .setting("xpack.ml.enabled", "false") - .setting("path.repo", () -> REPOSITORY_PATH.getRoot().getPath()) - .apply(() -> clusterConfig) - .build(); - - @ClassRule - public static TestRule ruleChain = RuleChain.outerRule(REPOSITORY_PATH).around(cluster); - - private static boolean upgradeFailed = false; - - private final Version clusterVersion; - - public AbstractLuceneIndexCompatibilityTestCase(@Name("cluster") Version clusterVersion) { - this.clusterVersion = clusterVersion; - } - - @ParametersFactory - public static Iterable parameters() { - return Stream.of(VERSION_MINUS_2, VERSION_MINUS_1, CURRENT).map(v -> new Object[] { v }).toList(); - } - - @Override - protected String getTestRestCluster() { - return cluster.getHttpAddresses(); - } - - @Override - protected boolean preserveClusterUponCompletion() { - return true; - } - - @Before - public void maybeUpgrade() throws Exception { - // We want to use this test suite for the V9 upgrade, but we are not fully committed to necessarily having N-2 support - // in V10, so we add a check here to ensure we'll revisit this decision once V10 exists. - assertThat("Explicit check that N-2 version is Elasticsearch 7", VERSION_MINUS_2.getMajor(), equalTo(7)); - - var currentVersion = clusterVersion(); - if (currentVersion.before(clusterVersion)) { - try { - cluster.upgradeToVersion(clusterVersion); - closeClients(); - initClient(); - } catch (Exception e) { - upgradeFailed = true; - throw e; - } - } - - // Skip remaining tests if upgrade failed - assumeFalse("Cluster upgrade failed", upgradeFailed); - } - - protected String suffix(String name) { - return name + '-' + getTestName().split(" ")[0].toLowerCase(Locale.ROOT); - } - - protected static Version clusterVersion() throws Exception { - var response = assertOK(client().performRequest(new Request("GET", "/"))); - var responseBody = createFromResponse(response); - var version = Version.fromString(responseBody.evaluate("version.number").toString()); - assertThat("Failed to retrieve cluster version", version, notNullValue()); - return version; - } - - protected static Version indexLuceneVersion(String indexName) throws Exception { - var response = assertOK(client().performRequest(new Request("GET", "/" + indexName + "/_settings"))); - int id = Integer.parseInt(createFromResponse(response).evaluate(indexName + ".settings.index.version.created")); - return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100)); - } - - /** - * Execute the test suite with the parameters provided by the {@link #parameters()} in version order. - */ - public static class TestCaseOrdering implements Comparator { - @Override - public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { - var version1 = (Version) o1.getInstanceArguments().get(0); - var version2 = (Version) o2.getInstanceArguments().get(0); - return version1.compareTo(version2); - } - } -} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartIndexCompatibilityTestCase.java new file mode 100644 index 0000000000000..9ca7132493ae6 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartIndexCompatibilityTestCase.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.util.Version; + +import java.util.Comparator; +import java.util.stream.Stream; + +import static org.elasticsearch.test.cluster.util.Version.CURRENT; +import static org.hamcrest.Matchers.equalTo; + +/** + * Test suite for Lucene indices backward compatibility with N-2 versions after full cluster restart upgrades. The test suite creates a + * cluster in N-2 version, then upgrades it to N-1 version and finally upgrades it to the current version. Test methods are executed after + * each upgrade. + */ +@TestCaseOrdering(FullClusterRestartIndexCompatibilityTestCase.TestCaseOrdering.class) +public abstract class FullClusterRestartIndexCompatibilityTestCase extends AbstractIndexCompatibilityTestCase { + + private final Version clusterVersion; + + public FullClusterRestartIndexCompatibilityTestCase(@Name("cluster") Version clusterVersion) { + this.clusterVersion = clusterVersion; + } + + @ParametersFactory + public static Iterable parameters() { + return Stream.of(VERSION_MINUS_2, VERSION_MINUS_1, CURRENT).map(v -> new Object[] { v }).toList(); + } + + @Override + protected void maybeUpgrade() throws Exception { + if (nodesVersions().values().stream().anyMatch(version -> version.before(clusterVersion))) { + cluster().upgradeToVersion(clusterVersion); + closeClients(); + initClient(); + } + assertThat(isFullyUpgradedTo(clusterVersion), equalTo(true)); + } + + /** + * Execute the test suite with the parameters provided by the {@link #parameters()} in version order. + */ + public static class TestCaseOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + var version1 = (Version) o1.getInstanceArguments().get(0); + var version2 = (Version) o2.getInstanceArguments().get(0); + return version1.compareTo(version2); + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java similarity index 58% rename from qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java rename to qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java index d6dd949b843d6..15d41cc981cea 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/LuceneCompatibilityIT.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartLuceneIndexCompatibilityIT.java @@ -10,47 +10,42 @@ package org.elasticsearch.lucene; import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.cluster.util.Version; -import java.util.stream.IntStream; - -import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { +public class FullClusterRestartLuceneIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase { static { clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial"); } - public LuceneCompatibilityIT(Version version) { + public FullClusterRestartLuceneIndexCompatibilityIT(Version version) { super(version); } + /** + * Creates an index and a snapshot on N-2, then restores the snapshot on N. + */ public void testRestoreIndex() throws Exception { final String repository = suffix("repository"); final String snapshot = suffix("snapshot"); final String index = suffix("index"); final int numDocs = 1234; - logger.debug("--> registering repository [{}]", repository); - registerRepository( - client(), - repository, - FsRepository.TYPE, - true, - Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build() - ); + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); - if (VERSION_MINUS_2.equals(clusterVersion())) { logger.debug("--> creating index [{}]", index); createIndex( client(), @@ -63,27 +58,17 @@ public void testRestoreIndex() throws Exception { ); logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); - final var bulks = new StringBuilder(); - IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format(""" - {"index":{"_id":"%s","_index":"%s"}} - {"test":"test"} - """, n, index))); - - var bulkRequest = new Request("POST", "/_bulk"); - bulkRequest.setJsonEntity(bulks.toString()); - var bulkResponse = client().performRequest(bulkRequest); - assertOK(bulkResponse); - assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false))); + indexDocs(index, numDocs); logger.debug("--> creating snapshot [{}]", snapshot); createSnapshot(client(), repository, snapshot, true); return; } - if (VERSION_MINUS_1.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_MINUS_1)) { ensureGreen(index); - assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2)); + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); assertDocCount(client(), index, numDocs); logger.debug("--> deleting index [{}]", index); @@ -91,11 +76,11 @@ public void testRestoreIndex() throws Exception { return; } - if (VERSION_CURRENT.equals(clusterVersion())) { + if (isFullyUpgradedTo(VERSION_CURRENT)) { var restoredIndex = suffix("index-restored"); - logger.debug("--> restoring index [{}] as archive [{}]", index, restoredIndex); + logger.debug("--> restoring index [{}] as [{}]", index, restoredIndex); - // Restoring the archive will fail as Elasticsearch does not support reading N-2 yet + // Restoring the index will fail as Elasticsearch does not support reading N-2 yet var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_restore"); request.addParameter("wait_for_completion", "true"); request.setJsonEntity(Strings.format(""" @@ -106,9 +91,20 @@ public void testRestoreIndex() throws Exception { "rename_replacement": "%s", "include_aliases": false }""", index, restoredIndex)); - var responseBody = createFromResponse(client().performRequest(request)); - assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); - assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); + + var responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode()); + assertThat( + responseException.getMessage(), + allOf( + containsString("cannot restore index [[" + index), + containsString("because it cannot be upgraded"), + containsString("has current compatibility version [" + VERSION_MINUS_2 + '-' + VERSION_MINUS_1.getMajor() + ".0.0]"), + containsString("but the minimum compatible version is [" + VERSION_MINUS_1.getMajor() + ".0.0]."), + containsString("It should be re-indexed in Elasticsearch " + VERSION_MINUS_1.getMajor() + ".x"), + containsString("before upgrading to " + VERSION_CURRENT) + ) + ); } } } diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java new file mode 100644 index 0000000000000..a7dc5e41fd327 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/FullClusterRestartSearchableSnapshotIndexCompatibilityIT.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.util.Version; + +import static org.hamcrest.Matchers.equalTo; + +public class FullClusterRestartSearchableSnapshotIndexCompatibilityIT extends FullClusterRestartIndexCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial") + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB"); + } + + public FullClusterRestartSearchableSnapshotIndexCompatibilityIT(Version version) { + super(version); + } + + /** + * Creates an index and a snapshot on N-2, then mounts the snapshot on N. + */ + public void testSearchableSnapshot() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 1234; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + ensureGreen(index); + + assertThat(indexVersion(index), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), index, numDocs); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + + if (isFullyUpgradedTo(VERSION_CURRENT)) { + var mountedIndex = suffix("index-mounted"); + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + ensureGreen(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + + updateRandomIndexSettings(mountedIndex); + updateRandomMappings(mountedIndex); + + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(mountedIndex); + + logger.debug("--> closing index [{}]", mountedIndex); + closeIndex(mountedIndex); + ensureGreen(mountedIndex); + + logger.debug("--> adding replica to test peer-recovery for closed shards"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)); + ensureGreen(mountedIndex); + + logger.debug("--> re-opening index [{}]", mountedIndex); + openIndex(mountedIndex); + ensureGreen(mountedIndex); + + assertDocCount(client(), mountedIndex, numDocs); + + logger.debug("--> deleting index [{}]", mountedIndex); + deleteIndex(mountedIndex); + } + } + + /** + * Creates an index and a snapshot on N-2, mounts the snapshot on N -1 and then upgrades to N. + */ + public void testSearchableSnapshotUpgrade() throws Exception { + final String mountedIndex = suffix("index-mounted"); + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index"); + final int numDocs = 4321; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + + if (isFullyUpgradedTo(VERSION_MINUS_1)) { + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + + ensureGreen(mountedIndex); + + updateRandomIndexSettings(mountedIndex); + updateRandomMappings(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + + logger.debug("--> adding replica to test replica upgrade"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)); + ensureGreen(mountedIndex); + + if (randomBoolean()) { + logger.debug("--> random closing of index [{}] before upgrade", mountedIndex); + closeIndex(mountedIndex); + ensureGreen(mountedIndex); + } + return; + } + + if (isFullyUpgradedTo(VERSION_CURRENT)) { + ensureGreen(mountedIndex); + + if (isIndexClosed(mountedIndex)) { + logger.debug("--> re-opening index [{}] after upgrade", mountedIndex); + openIndex(mountedIndex); + ensureGreen(mountedIndex); + } + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + + updateRandomIndexSettings(mountedIndex); + updateRandomMappings(mountedIndex); + + logger.debug("--> adding replica to test peer-recovery"); + updateIndexSettings(mountedIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)); + ensureGreen(mountedIndex); + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java new file mode 100644 index 0000000000000..03b6a9292e355 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeIndexCompatibilityTestCase.java @@ -0,0 +1,104 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.util.Version; + +import java.util.Comparator; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.test.cluster.util.Version.CURRENT; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Test suite for Lucene indices backward compatibility with N-2 versions during rolling upgrades. The test suite creates a cluster in N-2 + * version, then upgrades each node sequentially to N-1 version and finally upgrades each node sequentially to the current version. Test + * methods are executed after each node upgrade. + */ +@TestCaseOrdering(RollingUpgradeIndexCompatibilityTestCase.TestCaseOrdering.class) +public abstract class RollingUpgradeIndexCompatibilityTestCase extends AbstractIndexCompatibilityTestCase { + + private final List nodesVersions; + + public RollingUpgradeIndexCompatibilityTestCase(@Name("cluster") List nodesVersions) { + this.nodesVersions = nodesVersions; + } + + @ParametersFactory + public static Iterable parameters() { + return Stream.of( + // Begin on N-2 + List.of(VERSION_MINUS_2, VERSION_MINUS_2, VERSION_MINUS_2), + // Rolling upgrade to VERSION_MINUS_1 + List.of(VERSION_MINUS_1, VERSION_MINUS_2, VERSION_MINUS_2), + List.of(VERSION_MINUS_1, VERSION_MINUS_1, VERSION_MINUS_2), + List.of(VERSION_MINUS_1, VERSION_MINUS_1, VERSION_MINUS_1), + // Rolling upgrade to CURRENT + List.of(CURRENT, VERSION_MINUS_1, VERSION_MINUS_1), + List.of(CURRENT, CURRENT, VERSION_MINUS_1), + List.of(CURRENT, CURRENT, CURRENT) + ).map(nodesVersion -> new Object[] { nodesVersion }).toList(); + } + + @Override + protected void maybeUpgrade() throws Exception { + assertThat(nodesVersions, hasSize(NODES)); + + for (int i = 0; i < NODES; i++) { + var nodeName = cluster().getName(i); + + var expectedNodeVersion = nodesVersions.get(i); + assertThat(expectedNodeVersion, notNullValue()); + + var currentNodeVersion = nodesVersions().get(nodeName); + assertThat(currentNodeVersion, notNullValue()); + assertThat(currentNodeVersion.onOrBefore(expectedNodeVersion), equalTo(true)); + + if (currentNodeVersion.equals(expectedNodeVersion) == false) { + closeClients(); + cluster().upgradeNodeToVersion(i, expectedNodeVersion); + initClient(); + } + + currentNodeVersion = nodesVersions().get(nodeName); + assertThat(currentNodeVersion, equalTo(expectedNodeVersion)); + } + } + + /** + * Execute the test suite with the parameters provided by the {@link #parameters()} in nodes versions order. + */ + public static class TestCaseOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + List nodesVersions1 = asInstanceOf(List.class, o1.getInstanceArguments().get(0)); + assertThat(nodesVersions1, hasSize(NODES)); + List nodesVersions2 = asInstanceOf(List.class, o2.getInstanceArguments().get(0)); + assertThat(nodesVersions2, hasSize(NODES)); + for (int i = 0; i < NODES; i++) { + var nodeVersion1 = asInstanceOf(Version.class, nodesVersions1.get(i)); + var nodeVersion2 = asInstanceOf(Version.class, nodesVersions2.get(i)); + var result = nodeVersion1.compareTo(nodeVersion2); + if (result != 0) { + return result; + } + } + return 0; + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeSearchableSnapshotIndexCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeSearchableSnapshotIndexCompatibilityIT.java new file mode 100644 index 0000000000000..1117d36024bf0 --- /dev/null +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/RollingUpgradeSearchableSnapshotIndexCompatibilityIT.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.lucene; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.util.Version; + +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class RollingUpgradeSearchableSnapshotIndexCompatibilityIT extends RollingUpgradeIndexCompatibilityTestCase { + + static { + clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial") + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB"); + } + + public RollingUpgradeSearchableSnapshotIndexCompatibilityIT(List nodesVersion) { + super(nodesVersion); + } + + /** + * Creates an index and a snapshot on N-2, then mounts the snapshot during rolling upgrades. + */ + public void testMountSearchableSnapshot() throws Exception { + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index-rolling-upgrade"); + final var mountedIndex = suffix("index-rolling-upgrade-mounted"); + final int numDocs = 3145; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + return; + } + + boolean success = false; + try { + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + ensureGreen(mountedIndex); + + updateRandomIndexSettings(mountedIndex); + updateRandomMappings(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + + logger.debug("--> closing mounted index [{}]", mountedIndex); + closeIndex(mountedIndex); + ensureGreen(mountedIndex); + + logger.debug("--> re-opening index [{}]", mountedIndex); + openIndex(mountedIndex); + ensureGreen(mountedIndex); + + logger.debug("--> deleting mounted index [{}]", mountedIndex); + deleteIndex(mountedIndex); + + success = true; + } finally { + if (success == false) { + try { + client().performRequest(new Request("DELETE", "/" + mountedIndex)); + } catch (ResponseException e) { + logger.warn("Failed to delete mounted index [" + mountedIndex + ']', e); + } + } + } + } + + /** + * Creates an index and a snapshot on N-2, mounts the snapshot and ensures it remains searchable during rolling upgrades. + */ + public void testSearchableSnapshotUpgrade() throws Exception { + final String mountedIndex = suffix("index-rolling-upgraded-mounted"); + final String repository = suffix("repository"); + final String snapshot = suffix("snapshot"); + final String index = suffix("index-rolling-upgraded"); + final int numDocs = 2143; + + if (isFullyUpgradedTo(VERSION_MINUS_2)) { + logger.debug("--> registering repository [{}]", repository); + registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings()); + + logger.debug("--> creating index [{}]", index); + createIndex( + client(), + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + + logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); + indexDocs(index, numDocs); + + logger.debug("--> creating snapshot [{}]", snapshot); + createSnapshot(client(), repository, snapshot, true); + + logger.debug("--> deleting index [{}]", index); + deleteIndex(index); + + logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); + mountIndex(repository, snapshot, index, randomBoolean(), mountedIndex); + } + + ensureGreen(mountedIndex); + + if (isIndexClosed(mountedIndex)) { + logger.debug("--> re-opening index [{}] after upgrade", mountedIndex); + openIndex(mountedIndex); + ensureGreen(mountedIndex); + } + + updateRandomIndexSettings(mountedIndex); + updateRandomMappings(mountedIndex); + + assertThat(indexVersion(mountedIndex), equalTo(VERSION_MINUS_2)); + assertDocCount(client(), mountedIndex, numDocs); + + if (randomBoolean()) { + logger.debug("--> random closing of index [{}] before upgrade", mountedIndex); + closeIndex(mountedIndex); + ensureGreen(mountedIndex); + } + } +} diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java deleted file mode 100644 index 4f348b7fb122f..0000000000000 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/SearchableSnapshotCompatibilityIT.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.lucene; - -import org.elasticsearch.client.Request; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.test.cluster.util.Version; - -import java.util.stream.IntStream; - -import static org.elasticsearch.test.rest.ObjectPath.createFromResponse; -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; - -public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase { - - static { - clusterConfig = config -> config.setting("xpack.license.self_generated.type", "trial") - .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") - .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB"); - } - - public SearchableSnapshotCompatibilityIT(Version version) { - super(version); - } - - // TODO Add a test to mount the N-2 index on N-1 and then search it on N - - public void testSearchableSnapshot() throws Exception { - final String repository = suffix("repository"); - final String snapshot = suffix("snapshot"); - final String index = suffix("index"); - final int numDocs = 1234; - - logger.debug("--> registering repository [{}]", repository); - registerRepository( - client(), - repository, - FsRepository.TYPE, - true, - Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build() - ); - - if (VERSION_MINUS_2.equals(clusterVersion())) { - logger.debug("--> creating index [{}]", index); - createIndex( - client(), - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build() - ); - - logger.debug("--> indexing [{}] docs in [{}]", numDocs, index); - final var bulks = new StringBuilder(); - IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format(""" - {"index":{"_id":"%s","_index":"%s"}} - {"test":"test"} - """, n, index))); - - var bulkRequest = new Request("POST", "/_bulk"); - bulkRequest.setJsonEntity(bulks.toString()); - var bulkResponse = client().performRequest(bulkRequest); - assertOK(bulkResponse); - assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false))); - - logger.debug("--> creating snapshot [{}]", snapshot); - createSnapshot(client(), repository, snapshot, true); - return; - } - - if (VERSION_MINUS_1.equals(clusterVersion())) { - ensureGreen(index); - - assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2)); - assertDocCount(client(), index, numDocs); - - logger.debug("--> deleting index [{}]", index); - deleteIndex(index); - return; - } - - if (VERSION_CURRENT.equals(clusterVersion())) { - var mountedIndex = suffix("index-mounted"); - logger.debug("--> mounting index [{}] as [{}]", index, mountedIndex); - - // Mounting the index will fail as Elasticsearch does not support reading N-2 yet - var request = new Request("POST", "/_snapshot/" + repository + "/" + snapshot + "/_mount"); - request.addParameter("wait_for_completion", "true"); - var storage = randomBoolean() ? "shared_cache" : "full_copy"; - request.addParameter("storage", storage); - request.setJsonEntity(Strings.format(""" - { - "index": "%s", - "renamed_index": "%s" - }""", index, mountedIndex)); - var responseBody = createFromResponse(client().performRequest(request)); - assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed"))); - assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0)); - } - } -} diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index d8f906b23d523..8dd5031c07822 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -67,6 +67,12 @@ excludeList.add('indices.resolve_index/20_resolve_system_index/*') // Excluded because the error has changed excludeList.add('aggregations/percentiles_hdr_metric/Negative values test') +// sync_id is removed in 9.0 +excludeList.add("cat.shards/10_basic/Help") + +// Can't work until auto-expand replicas is 0-1 for synonyms index +excludeList.add("synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set") + def clusterPath = getPath() buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java index 0cd2823080b9b..808aec92fb35d 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java @@ -206,13 +206,32 @@ public static void waitForElasticsearchToStart() { ps output: %s - stdout(): + Stdout: %s Stderr: + %s + + Thread dump: %s\ - """, psOutput, dockerLogs.stdout(), dockerLogs.stderr())); + """, psOutput, dockerLogs.stdout(), dockerLogs.stderr(), getThreadDump())); + } + } + + /** + * @return output of jstack for currently running Java process + */ + private static String getThreadDump() { + try { + String pid = dockerShell.run("/usr/share/elasticsearch/jdk/bin/jps | grep -v 'Jps' | awk '{print $1}'").stdout(); + if (pid.isEmpty() == false) { + return dockerShell.run("/usr/share/elasticsearch/jdk/bin/jstack " + Integer.parseInt(pid)).stdout(); + } + } catch (Exception e) { + logger.error("Failed to get thread dump", e); } + + return ""; } /** diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index 5dc47993072a8..554ae871ea9fa 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -29,6 +29,9 @@ */ public class DockerRun { + // Use less secure entropy source to avoid hanging when generating certificates + private static final String DEFAULT_JAVA_OPTS = "-Djava.security.egd=file:/dev/urandom"; + private Distribution distribution; private final Map envVars = new HashMap<>(); private final Map volumes = new HashMap<>(); @@ -112,6 +115,11 @@ String build() { // Limit container memory cmd.add("--memory " + memory); + // Add default java opts + for (String envVar : List.of("CLI_JAVA_OPTS", "ES_JAVA_OPTS")) { + this.envVars.put(envVar, this.envVars.getOrDefault(envVar, "") + " " + DEFAULT_JAVA_OPTS); + } + this.envVars.forEach((key, value) -> cmd.add("--env " + key + "=\"" + value + "\"")); // Map ports in the container to the host, so that we can send requests diff --git a/qa/remote-clusters/build.gradle b/qa/remote-clusters/build.gradle index 8a28ea8fd1c5c..e3d42850375d9 100644 --- a/qa/remote-clusters/build.gradle +++ b/qa/remote-clusters/build.gradle @@ -39,14 +39,22 @@ elasticsearch_distributions { } } + +interface Injected { + @Inject + FileSystemOperations getFs() +} + tasks.named("preProcessFixture").configure { dependsOn "copyNodeKeyMaterial", elasticsearch_distributions.docker + def injected = project.objects.newInstance(Injected) + doLast { // tests expect to have an empty repo - project.delete( - "${testFixturesDir}/repo", - "${testFixturesDir}/oss-repo" - ) + injected.fs.delete { + it.delete("${testFixturesDir}/repo") + it.delete("${testFixturesDir}/oss-repo") + } createAndSetWritable( "${testFixturesDir}/repo", "${testFixturesDir}/oss-repo", @@ -69,7 +77,8 @@ dockerCompose { def createAndSetWritable(Object... locations) { locations.each { location -> - File file = file(location) + println "location = $location" + File file = new File(location) file.mkdirs() file.setWritable(true, false) } diff --git a/qa/remote-clusters/output.log b/qa/remote-clusters/output.log new file mode 100644 index 0000000000000..11eebe6fb8fda --- /dev/null +++ b/qa/remote-clusters/output.log @@ -0,0 +1,125 @@ +Using gradle at '/Users/rene/dev/elastic/elasticsearch/gradlew' to run buildfile '/Users/rene/dev/elastic/elasticsearch/qa/remote-clusters/build.gradle': + +Starting a Gradle Daemon (subsequent builds will be faster) +Calculating task graph as no cached configuration is available for tasks: check + +> Task :build-conventions:compileJava +Note: Some input files use or override a deprecated API. +Note: Recompile with -Xlint:deprecation for details. +Note: /Users/rene/dev/elastic/elasticsearch/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/LicensingPlugin.java uses unchecked or unsafe operations. +Note: Recompile with -Xlint:unchecked for details. + +> Task :build-conventions:pluginDescriptors +> Task :build-conventions:processResources +> Task :build-conventions:classes +> Task :build-conventions:jar +> Task :build-tools:reaper:compileJava FROM-CACHE +> Task :build-tools:reaper:processResources NO-SOURCE +> Task :build-tools:reaper:classes UP-TO-DATE +> Task :build-tools-internal:extractPluginRequests FROM-CACHE +> Task :build-tools:reaper:jar +> Task :build-tools-internal:generatePluginAdapters FROM-CACHE +> Task :build-tools-internal:pluginDescriptors +> Task :build-tools:compileJava FROM-CACHE +> Task :build-tools:compileGroovy NO-SOURCE +> Task :build-tools:generateVersionProperties FROM-CACHE +> Task :build-tools:pluginDescriptors +> Task :build-tools:processResources +> Task :build-tools:classes +> Task :build-tools:jar +> Task :build-tools-internal:processResources + +> Task :build-tools-internal:compileJava +Note: Some input files use or override a deprecated API. +Note: Recompile with -Xlint:deprecation for details. +Note: Some input files use unchecked or unsafe operations. +Note: Recompile with -Xlint:unchecked for details. + +> Task :build-tools-internal:compileGroovy +> Task :build-tools-internal:compileGroovyPlugins +> Task :build-tools-internal:classes +> Task :build-tools-internal:jar +======================================= +Elasticsearch Build Hamster says Hello! + Gradle Version : 8.12 + OS Info : Mac OS X 15.2 (aarch64) + JDK Version : 21.0.5+9-LTS-239 (Oracle) + JAVA_HOME : /Users/rene/.sdkman/candidates/java/21.0.5-oracle + Random Testing Seed : 8D9A9CFD8E09C560 + In FIPS 140 mode : false +======================================= +> Task :qa:remote-clusters:processTestResources +> Task :server:generateModulesList +> Task :server:generatePluginsList +> Task :qa:remote-clusters:copyCheckstyleConf +> Task :qa:remote-clusters:filepermissions +> Task :client:sniffer:processResources NO-SOURCE +> Task :libs:core:processResources NO-SOURCE +> Task :libs:cli:processResources NO-SOURCE +> Task :libs:entitlement:processResources NO-SOURCE +> Task :libs:entitlement:bridge:processResources NO-SOURCE +> Task :libs:entitlement:asm-provider:processResources +> Task :libs:entitlement:processMain23Resources NO-SOURCE +> Task :libs:entitlement:bridge:processMain23Resources NO-SOURCE +> Task :libs:geo:processResources NO-SOURCE +> Task :libs:logging:processResources NO-SOURCE +> Task :libs:lz4:processResources NO-SOURCE +> Task :libs:native:processResources NO-SOURCE +> Task :libs:native:processMain22Resources NO-SOURCE +> Task :libs:plugin-analysis-api:processResources NO-SOURCE +> Task :libs:plugin-api:processResources NO-SOURCE +> Task :libs:secure-sm:processResources NO-SOURCE +> Task :libs:simdvec:processResources NO-SOURCE +> Task :libs:simdvec:processMain21Resources NO-SOURCE +> Task :libs:simdvec:processMain22Resources NO-SOURCE +> Task :client:rest:processResources +> Task :libs:tdigest:processResources NO-SOURCE +> Task :libs:x-content:impl:processResources +> Task :libs:x-content:processResources NO-SOURCE +> Task :modules:transport-netty4:processResources NO-SOURCE +> Task :server:processResources +> Task :libs:ssl-config:processResources NO-SOURCE +> Task :test:yaml-rest-runner:processResources NO-SOURCE +> Task :qa:remote-clusters:forbiddenApisResources +> Task :qa:remote-clusters:forbiddenPatterns +> Task :qa:remote-clusters:licenseHeaders +> Task :test:logger-usage:processResources NO-SOURCE +> Task :libs:grok:processResources +> Task :qa:remote-clusters:testingConventions UP-TO-DATE +> Task :qa:remote-clusters:copyNodeKeyMaterial FAILED +> Task :spotlessInternalRegisterDependencies +> Task :test:framework:processResources +> Task :libs:plugin-api:compileJava +> Task :libs:logging:compileJava +> Task :libs:entitlement:bridge:compileJava + +> Task :libs:secure-sm:compileJava +Note: Some input files use or override a deprecated API that is marked for removal. +Note: Recompile with -Xlint:removal for details. + +> Task :test:logger-usage:compileJava +> Task :qa:remote-clusters:spotlessJava +> Task :libs:grok:compileJava +> Task :libs:geo:compileJava +> Task :client:rest:compileJava +> Task :qa:remote-clusters:checkstyleTest + +[Incubating] Problems report is available at: file:///Users/rene/dev/elastic/elasticsearch/build/reports/problems/problems-report.html + +FAILURE: Build failed with an exception. + +* Where: +Build file '/Users/rene/dev/elastic/elasticsearch/qa/remote-clusters/build.gradle' line: 27 + +* What went wrong: +Execution failed for task ':qa:remote-clusters:copyNodeKeyMaterial'. +> Cannot reference a Gradle script object from a Groovy closure as these are not supported with the configuration cache. + +* Try: +> Run with --stacktrace option to get the stack trace. +> Run with --info or --debug option to get more log output. +> Get more help at https://help.gradle.org. + +BUILD FAILED in 1m 4s +45 actionable tasks: 40 executed, 5 from cache +Configuration cache entry stored. diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java deleted file mode 100644 index 2b3e10acc15ed..0000000000000 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.upgrades; - -import com.carrotsearch.randomizedtesting.annotations.Name; - -import org.elasticsearch.client.Request; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.features.FeatureService; -import org.junit.Before; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.hasSize; - -public class ClusterFeatureMigrationIT extends AbstractRollingUpgradeTestCase { - - @Before - public void checkMigrationVersion() { - assumeFalse( - "This checks migrations from before cluster features were introduced", - oldClusterHasFeature(FeatureService.FEATURES_SUPPORTED) - ); - } - - public ClusterFeatureMigrationIT(@Name("upgradedNodes") int upgradedNodes) { - super(upgradedNodes); - } - - public void testClusterFeatureMigration() throws IOException { - if (isUpgradedCluster()) { - // check the nodes all have a feature in their cluster state (there should always be features_supported) - var response = entityAsMap(adminClient().performRequest(new Request("GET", "/_cluster/state/nodes"))); - List nodeFeatures = (List) XContentMapValues.extractValue("nodes_features", response); - assertThat(nodeFeatures, hasSize(adminClient().getNodes().size())); - - Map> features = nodeFeatures.stream() - .map(o -> (Map) o) - .collect(Collectors.toMap(m -> (String) m.get("node_id"), m -> (List) m.get("features"))); - - Set missing = features.entrySet() - .stream() - .filter(e -> e.getValue().contains(FeatureService.FEATURES_SUPPORTED.id()) == false) - .map(Map.Entry::getKey) - .collect(Collectors.toSet()); - assertThat(missing + " out of " + features.keySet() + " does not have the required feature", missing, empty()); - } - } -} diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index d9adec47ff483..30367bf55d8cc 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.DesiredNode; @@ -84,7 +83,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve randomDoubleProcessorCount(), ByteSizeValue.ofGb(randomIntBetween(10, 24)), ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + null ) ) .toList(); @@ -96,7 +95,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), ByteSizeValue.ofGb(randomIntBetween(10, 24)), ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + null ); }).toList(); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java index a1b3dcc37c45b..c6f4588fb4cd2 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java @@ -12,7 +12,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.HttpHost; -import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Build; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.client.Request; @@ -40,7 +39,6 @@ * In 8.2 we also added the ability to filter fields by type and metadata, with some post-hoc filtering applied on * the co-ordinating node if older nodes were included in the system */ -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103473") public class FieldCapsIT extends AbstractRollingUpgradeTestCase { public FieldCapsIT(@Name("upgradedNodes") int upgradedNodes) { diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java index d6254d091a868..c48ae9ba1843b 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java @@ -12,9 +12,15 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction; +import org.elasticsearch.action.admin.indices.template.get.GetComposableIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; +import org.elasticsearch.action.admin.indices.template.post.SimulateIndexTemplateAction; +import org.elasticsearch.action.admin.indices.template.post.SimulateTemplateAction; import org.elasticsearch.action.support.CancellableActionTestPlugin; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; @@ -66,6 +72,37 @@ public void testCatAliasesCancellation() { runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cat/aliases"), GetAliasesAction.NAME); } + public void testGetComponentTemplateCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_component_template"), GetComponentTemplateAction.NAME); + } + + public void testGetIndexTemplateCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_template"), GetIndexTemplatesAction.NAME); + } + + public void testGetComposableTemplateCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_index_template"), GetComposableIndexTemplateAction.NAME); + } + + public void testSimulateTemplateCancellation() { + runRestActionCancellationTest( + new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate/random_index_template"), + SimulateTemplateAction.NAME + ); + } + + public void testSimulateIndexTemplateCancellation() { + createIndex("test"); + runRestActionCancellationTest( + new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate_index/test"), + SimulateIndexTemplateAction.NAME + ); + } + + public void testClusterGetSettingsCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/settings"), ClusterGetSettingsAction.NAME); + } + private void runRestActionCancellationTest(Request request, String actionName) { final var node = usually() ? internalCluster().getRandomNodeName() : internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java new file mode 100644 index 0000000000000..6f9ab8ccdfdec --- /dev/null +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/SearchErrorTraceIT.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http; + +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NByteArrayEntity; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.Request; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.transport.TransportMessageListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; + +public class SearchErrorTraceIT extends HttpSmokeTestCase { + private AtomicBoolean hasStackTrace; + + @Before + private void setupMessageListener() { + internalCluster().getDataNodeInstances(TransportService.class).forEach(ts -> { + ts.addMessageListener(new TransportMessageListener() { + @Override + public void onResponseSent(long requestId, String action, Exception error) { + TransportMessageListener.super.onResponseSent(requestId, action, error); + if (action.startsWith("indices:data/read/search")) { + Optional throwable = ExceptionsHelper.unwrapCausesAndSuppressed( + error, + t -> t.getStackTrace().length > 0 + ); + hasStackTrace.set(throwable.isPresent()); + } + } + }); + }); + } + + private void setupIndexWithDocs() { + createIndex("test1", "test2"); + indexRandom( + true, + prepareIndex("test1").setId("1").setSource("field", "foo"), + prepareIndex("test2").setId("10").setSource("field", 5) + ); + refresh(); + } + + public void testSearchFailingQueryErrorTraceDefault() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testSearchFailingQueryErrorTraceTrue() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "true"); + getRestClient().performRequest(searchRequest); + assertTrue(hasStackTrace.get()); + } + + public void testSearchFailingQueryErrorTraceFalse() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + Request searchRequest = new Request("POST", "/_search"); + searchRequest.setJsonEntity(""" + { + "query": { + "simple_query_string" : { + "query": "foo", + "fields": ["field"] + } + } + } + """); + searchRequest.addParameter("error_trace", "false"); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceDefault() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + getRestClient().performRequest(searchRequest); + assertFalse(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceTrue() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + searchRequest.addParameter("error_trace", "true"); + getRestClient().performRequest(searchRequest); + assertTrue(hasStackTrace.get()); + } + + public void testMultiSearchFailingQueryErrorTraceFalse() throws IOException { + hasStackTrace = new AtomicBoolean(); + setupIndexWithDocs(); + + XContentType contentType = XContentType.JSON; + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add( + new SearchRequest("test*").source(new SearchSourceBuilder().query(simpleQueryStringQuery("foo").field("field"))) + ); + Request searchRequest = new Request("POST", "/_msearch"); + byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); + searchRequest.setEntity( + new NByteArrayEntity(requestBody, ContentType.create(contentType.mediaTypeWithoutParameters(), (Charset) null)) + ); + searchRequest.addParameter("error_trace", "false"); + getRestClient().performRequest(searchRequest); + + assertFalse(hasStackTrace.get()); + } +} diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 2d3fa6b568381..4a5ceeb66f661 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -222,10 +222,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.validation.templates"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: headers: Content-Type: application/json @@ -313,10 +309,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.component.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -494,10 +486,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.component.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -617,10 +605,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.component.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -816,10 +800,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.index.template.substitutions"] - reason: "ingest simulate index template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -1010,10 +990,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.index.template.substitutions"] - reason: "ingest simulate component template substitutions added in 8.16" - - do: headers: Content-Type: application/json @@ -1227,10 +1203,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.17" - - do: headers: Content-Type: application/json @@ -1463,10 +1435,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.17" - - do: indices.put_template: name: my-legacy-template @@ -1584,10 +1552,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.support.non.template.mapping"] - reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17" - # A global match-everything legacy template is added to the cluster sometimes (rarely). We have to get rid of this template if it exists # because this test is making sure we get correct behavior when an index matches *no* template: - do: @@ -1720,3 +1684,59 @@ setup: - match: { docs.0.doc._source.foo: 3 } - match: { docs.0.doc._source.bar: "some text value" } - not_exists: docs.0.doc.error + +--- +"Test ignored_fields": + - skip: + features: + - headers + - allowed_warnings + + - requires: + cluster_features: ["simulate.ignored.fields"] + reason: "ingest simulate ignored fields added in 8.18" + + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: nonexistent + body: > + { + "docs": [ + { + "_index": "simulate-test", + "_id": "y9Es_JIBiw6_GgN-U0qy", + "_score": 1, + "_source": { + "abc": "sfdsfsfdsfsfdsfsfdsfsfdsfsfdsf" + } + } + ], + "index_template_substitutions": { + "ind_temp": { + "index_patterns": ["simulate-test"], + "composed_of": ["simulate-test"] + } + }, + "component_template_substitutions": { + "simulate-test": { + "template": { + "mappings": { + "dynamic": false, + "properties": { + "abc": { + "type": "keyword", + "ignore_above": 1 + } + } + } + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "simulate-test" } + - match: { docs.0.doc._source.abc: "sfdsfsfdsfsfdsfsfdsfsfdsfsfdsf" } + - match: { docs.0.doc.ignored_fields: [ {"field": "abc"} ] } + - not_exists: docs.0.doc.error diff --git a/qa/system-indices/build.gradle b/qa/system-indices/build.gradle index d035b2f57d55a..c619d4f02e527 100644 --- a/qa/system-indices/build.gradle +++ b/qa/system-indices/build.gradle @@ -11,11 +11,11 @@ apply plugin: 'elasticsearch.base-internal-es-plugin' apply plugin: 'elasticsearch.legacy-java-rest-test' esplugin { - name 'system-indices-qa' - description 'Plugin for performing QA of system indices' - classname 'org.elasticsearch.system.indices.SystemIndicesQA' - licenseFile rootProject.file('licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'system-indices-qa' + description = 'Plugin for performing QA of system indices' + classname ='org.elasticsearch.system.indices.SystemIndicesQA' + licenseFile = rootProject.file('licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } testClusters.configureEach { diff --git a/renovate.json b/renovate.json index c1637ae651c1c..71c6301f8e0c2 100644 --- a/renovate.json +++ b/renovate.json @@ -7,8 +7,8 @@ "schedule": [ "after 1pm on tuesday" ], - "labels": [">non-issue", ":Delivery/Packaging", "Team:Delivery"], - "baseBranches": ["main", "8.x"], + "labels": [">non-issue", ":Delivery/Packaging", "Team:Delivery", "auto-merge-without-approval"], + "baseBranches": ["main", "8.x", "8.17", "8.16"], "packageRules": [ { "groupName": "wolfi (versioned)", diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 7347d9c1312dd..e4b46b98cedda 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -13,7 +13,7 @@ restResources { } // REST API specifications are published under the Apache 2.0 License -ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) +ext.projectLicenses.set(['The Apache Software License, Version 2.0': providers.provider(() -> 'http://www.apache.org/licenses/LICENSE-2.0')]) licenseFile.set(rootProject.file('licenses/APACHE-LICENSE-2.0.txt')) configurations { @@ -60,13 +60,31 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") task.skipTest("cat.shards/10_basic/Help", "sync_id is removed in 9.0") task.skipTest("search/500_date_range/from, to, include_lower, include_upper deprecated", "deprecated parameters are removed in 9.0") - task.skipTest("tsdb/20_mapping/stored source is supported", "no longer serialize source_mode") - task.skipTest("tsdb/20_mapping/Synthetic source", "no longer serialize source_mode") - task.skipTest("logsdb/10_settings/create logs index", "no longer serialize source_mode") - task.skipTest("logsdb/20_source_mapping/stored _source mode is supported", "no longer serialize source_mode") - task.skipTest("logsdb/20_source_mapping/include/exclude is supported with stored _source", "no longer serialize source_mode") - task.skipTest("logsdb/20_source_mapping/synthetic _source is default", "no longer serialize source_mode") - task.skipTest("search/520_fetch_fields/fetch _seq_no via fields", "error code is changed from 5xx to 400 in 9.0") + task.skipTest("logsdb/10_settings/logsdb with default ignore dynamic beyond limit and default sorting", "skip until pr/118968 gets backported") + task.skipTest("logsdb/10_settings/logsdb with default ignore dynamic beyond limit and too low limit", "skip until pr/118968 gets backported") + task.skipTest("logsdb/10_settings/logsdb with default ignore dynamic beyond limit and subobjects false", "skip until pr/118968 gets backported") + task.skipTest("logsdb/10_settings/override sort missing settings", "skip until pr/118968 gets backported") + task.skipTest("logsdb/10_settings/override sort order settings", "skip until pr/118968 gets backported") + task.skipTest("logsdb/10_settings/override sort mode settings", "skip until pr/118968 gets backported") task.skipTest("search.vectors/41_knn_search_bbq_hnsw/Test knn search", "Scoring has changed in latest versions") task.skipTest("search.vectors/42_knn_search_bbq_flat/Test knn search", "Scoring has changed in latest versions") + task.skipTest("search.vectors/180_update_dense_vector_type/Test create and update dense vector mapping with bulk indexing", "waiting for #118774 backport") + task.skipTest("search.vectors/160_knn_query_missing_params/kNN query in a bool clause - missing num_candidates", "waiting for #118774 backport") + task.skipTest("search.vectors/110_knn_query_with_filter/Simple knn query", "waiting for #118774 backport") + task.skipTest("search.vectors/160_knn_query_missing_params/kNN search used in nested field - missing num_candidates", "waiting for #118774 backport") + task.skipTest("search.vectors/180_update_dense_vector_type/Test create and update dense vector mapping to int4 with per-doc indexing and flush", "waiting for #118774 backport") + task.skipTest("search.vectors/110_knn_query_with_filter/PRE_FILTER: knn query with internal filter as pre-filter", "waiting for #118774 backport") + task.skipTest("search.vectors/180_update_dense_vector_type/Index, update and merge", "waiting for #118774 backport") + task.skipTest("search.vectors/160_knn_query_missing_params/kNN query with missing num_candidates param - size provided", "waiting for #118774 backport") + task.skipTest("search.vectors/110_knn_query_with_filter/POST_FILTER: knn query with filter from a parent bool query as post-filter", "waiting for #118774 backport") + task.skipTest("search.vectors/120_knn_query_multiple_shards/Aggregations with collected number of docs depends on num_candidates", "waiting for #118774 backport") + task.skipTest("search.vectors/180_update_dense_vector_type/Test create and update dense vector mapping with per-doc indexing and flush", "waiting for #118774 backport") + task.skipTest("search.vectors/110_knn_query_with_filter/PRE_FILTER: knn query with alias filter as pre-filter", "waiting for #118774 backport") + task.skipTest("search.vectors/140_knn_query_with_other_queries/Function score query with knn query", "waiting for #118774 backport") + task.skipTest("search.vectors/130_knn_query_nested_search/nested kNN search inner_hits size > 1", "waiting for #118774 backport") + task.skipTest("search.vectors/110_knn_query_with_filter/PRE_FILTER: pre-filter across multiple aliases", "waiting for #118774 backport") + task.skipTest("search.vectors/160_knn_query_missing_params/kNN search in a dis_max query - missing num_candidates", "waiting for #118774 backport") + task.skipTest("search.highlight/30_max_analyzed_offset/Plain highlighter with max_analyzed_offset < 0 should FAIL", "semantics of test has changed") + task.skipTest("indices.create/10_basic/Create lookup index", "default auto_expand_replicas was removed") + task.skipTest("indices.create/10_basic/Create lookup index with one shard", "default auto_expand_replicas was removed") }) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.exists_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.exists_component_template.json index 818d034ca8158..b2503659329a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.exists_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.exists_component_template.json @@ -28,9 +28,10 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Timeout for waiting for new cluster state in case it is blocked" }, "local":{ + "deprecated":true, "type":"boolean", "description":"Return local information, do not retrieve the state from master node (default: false)" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json index 889e1b817b0fe..def0cc5fb8bb0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_component_template.json @@ -34,9 +34,10 @@ "params":{ "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Timeout for waiting for new cluster state in case it is blocked" }, "local":{ + "deprecated":true, "type":"boolean", "description":"Return local information, do not retrieve the state from master node (default: false)" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json index 5862804257c67..5004ab8de697d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json @@ -26,7 +26,7 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Timeout for waiting for new cluster state in case it is blocked" }, "timeout":{ "type":"time", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json index 2645df28c5d1e..670bb4267bdfa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json @@ -26,6 +26,13 @@ } } ] + }, + "params": { + "include_deleted": { + "type": "boolean", + "default": false, + "description": "A flag indicating whether to return connectors that have been soft-deleted." + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json index 67d2250d3c661..b8c73a09704f1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json @@ -47,6 +47,11 @@ "query": { "type": "string", "description": "A search string for querying connectors, filtering results by matching against connector names, descriptions, and index names" + }, + "include_deleted": { + "type": "boolean", + "default": false, + "description": "A flag indicating whether to return connectors that have been soft-deleted." } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json index c854c44d9d761..0f9af508f4c16 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/eql.search.json @@ -41,6 +41,16 @@ "type": "time", "description": "Update the time interval in which the results (partial or final) for this search will be available", "default": "5d" + }, + "allow_partial_search_results": { + "type":"boolean", + "description":"Control whether the query should keep running in case of shard failures, and return partial results", + "default":false + }, + "allow_partial_sequence_results": { + "type":"boolean", + "description":"Control whether a sequence query should return partial results or no results at all in case of shard failures. This option has effect only if [allow_partial_search_results] is true.", + "default":false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_delete.json new file mode 100644 index 0000000000000..a6339559afd72 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_delete.json @@ -0,0 +1,27 @@ +{ + "esql.async_query_delete": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-delete-api.html", + "description": "Delete an async query request given its ID." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_query/async/{id}", + "methods": ["DELETE"], + "parts": { + "id": { + "type": "string", + "description": "The async query ID" + } + } + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json index 2ff1031ad5c52..cd6397fb61586 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.delete_lifecycle.json @@ -25,6 +25,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json index c793ed09281ae..94c37adb802f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.explain_lifecycle.json @@ -33,6 +33,10 @@ "only_errors": { "type": "boolean", "description": "filters the indices included in the response to ones in an ILM error state, implies only_managed" + }, + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json index 17bf813093dd6..5abdfac7f5b30 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.get_lifecycle.json @@ -31,6 +31,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json index 5a12a778241b3..b7fdbe04a0ffb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.put_lifecycle.json @@ -26,7 +26,16 @@ } ] }, - "params":{}, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + }, "body":{ "description":"The lifecycle policy definition to register" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json index 88b020071ab82..7141673ff9a9d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.start.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json index 8401f93badfc4..962fa77263ee4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ilm.stop.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json index 77cb62b022515..a7f272af5b307 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json @@ -32,9 +32,10 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Timeout for waiting for new cluster state in case it is blocked" }, "local":{ + "deprecated":true, "type":"boolean", "description":"Return local information, do not retrieve the state from master node (default: false)" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle_stats.json new file mode 100644 index 0000000000000..8c9e947903402 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle_stats.json @@ -0,0 +1,21 @@ +{ + "indices.get_data_lifecycle_stats": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle-stats.html", + "description": "Get data stream lifecycle statistics." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_lifecycle/stats", + "methods": ["GET"] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json index 4ed82c3bbc5eb..f0351ce8cfe94 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json @@ -38,9 +38,10 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Timeout for waiting for new cluster state in case it is blocked" }, "local":{ + "deprecated":true, "type":"boolean", "description":"Return local information, do not retrieve the state from master node (default: false)" }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json index 963d9a4cb670e..74fdd1ef5c8a1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json @@ -38,9 +38,10 @@ }, "master_timeout":{ "type":"time", - "description":"Explicit operation timeout for connection to master node" + "description":"Timeout for waiting for new cluster state in case it is blocked" }, "local":{ + "deprecated":true, "type":"boolean", "description":"Return local information, do not retrieve the state from master node (default: false)" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index 299c24f987d8d..47a1bee665506 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -63,12 +63,6 @@ "type":"boolean", "default":"false", "description":"If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams." - }, - "target_failure_store":{ - "type":"boolean", - "description":"If set to true, the rollover action will be applied on the failure store of the data stream.", - "visibility": "feature_flag", - "feature_flag": "es.failure_store_feature_flag_enabled" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json deleted file mode 100644 index 2327519ff2816..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "indices.unfreeze":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html", - "description":"Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/{index}/_unfreeze", - "methods":[ - "POST" - ], - "parts":{ - "index":{ - "type":"string", - "description":"The name of the index to unfreeze" - } - }, - "deprecated":{ - "version":"7.14.0", - "description":"Frozen indices are deprecated because they provide no benefit given improvements in heap memory utilization. They will be removed in a future release." - } - } - ] - }, - "params":{ - "timeout":{ - "type":"time", - "description":"Explicit operation timeout" - }, - "master_timeout":{ - "type":"time", - "description":"Specify timeout for connection to master" - }, - "ignore_unavailable":{ - "type":"boolean", - "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "hidden", - "none", - "all" - ], - "default":"closed", - "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "wait_for_active_shards":{ - "type":"string", - "description":"Sets the number of active shards to wait for before the operation returns." - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/inference.update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.update.json new file mode 100644 index 0000000000000..6c458ce080aa7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/inference.update.json @@ -0,0 +1,45 @@ +{ + "inference.update": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-inference-api.html", + "description": "Update inference" + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"], + "content_type": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_inference/{inference_id}/_update", + "methods": ["POST"], + "parts": { + "inference_id": { + "type": "string", + "description": "The inference Id" + } + } + }, + { + "path": "/_inference/{task_type}/{inference_id}/_update", + "methods": ["POST"], + "parts": { + "task_type": { + "type": "string", + "description": "The task type" + }, + "inference_id": { + "type": "string", + "description": "The inference Id" + } + } + } + ] + }, + "body": { + "description": "The inference endpoint's task and service settings" + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json index fe50da720a4da..f76d328836d90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_geoip_database.json @@ -26,6 +26,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json index e97d1da276906..341ff5081e270 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_ip_location_database.json @@ -26,6 +26,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json index 6d088e3f164f4..9c2677d1f7b2f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json @@ -27,6 +27,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } }, "body":{ "description":"The database configuration definition", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json index 18487969b1a90..782048b98160a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_ip_location_database.json @@ -27,6 +27,14 @@ ] }, "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } }, "body":{ "description":"The database configuration definition", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json b/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json index 986040d69cb4f..9fb85807d611f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json @@ -31,10 +31,6 @@ "master_timeout": { "type": "time", "description": "Timeout for processing on master node" - }, - "timeout": { - "type": "time", - "description": "Timeout for acknowledgement of update from all nodes in cluster" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json new file mode 100644 index 0000000000000..e17a69a77b252 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json @@ -0,0 +1,37 @@ +{ + "migrate.create_from":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values." + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_create_from/{source}/{dest}", + "methods":[ "PUT", "POST"], + "parts":{ + "source":{ + "type":"string", + "description":"The source index name" + }, + "dest":{ + "type":"string", + "description":"The destination index name" + } + } + } + ] + }, + "body":{ + "description":"The body contains the fields `mappings_override` and `settings_override`.", + "required":false + } + } +} + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.delegate_pki.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.delegate_pki.json new file mode 100644 index 0000000000000..752ea35028b4f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.delegate_pki.json @@ -0,0 +1,26 @@ +{ + "security.delegate_pki": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/security-api-delegate-pki-authentication.html", + "description": "Delegate PKI authentication." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_security/delegate_pki", + "methods": ["POST"] + } + ] + }, + "params": {}, + "body": { + "description":"The X509Certificate chain.", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json index d990d1da1f144..6f1ec484e94d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json @@ -26,6 +26,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json index bf20cf3b70bac..90b19557f5fb2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json @@ -26,7 +26,16 @@ } ] }, - "params":{}, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + }, "body":{ "description":"The shutdown type definition to register", "required": true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.delete_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.delete_lifecycle.json index 12202a7a2a7b1..1d66312f053c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.delete_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.delete_lifecycle.json @@ -25,6 +25,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_lifecycle.json index 1395a3d3275ae..71f1727a8638b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_lifecycle.json @@ -25,6 +25,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_retention.json b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_retention.json index f6ce3e75cc379..4166122d5bf1d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_retention.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.execute_retention.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_lifecycle.json index 94d0772a405da..406fee6015522 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_lifecycle.json @@ -31,6 +31,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_stats.json index aa693ad31711c..05281ff46cb8d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_stats.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_status.json index 92ba1b4c321e6..404f92f55921f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_status.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.get_status.json @@ -19,6 +19,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.put_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.put_lifecycle.json index 7e7babb987c79..621ed870ffdbe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/slm.put_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/slm.put_lifecycle.json @@ -26,7 +26,16 @@ } ] }, - "params":{}, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + }, "body":{ "description":"The snapshot lifecycle policy definition to register" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json index 68b2a5d2c2c8b..35895f0ddb581 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json @@ -20,6 +20,12 @@ ] }, "params":{ + "human":{ + "type":"boolean", + "required":false, + "description":"Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. The default value is true.", + "default":true + }, "categories":{ "type":"list", "description":"Comma-separated list of info categories. Can be any of: build, license, features" diff --git a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java index 084e212a913b2..675092bffe8d5 100644 --- a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java +++ b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java @@ -14,7 +14,6 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; @@ -43,15 +42,9 @@ public ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate super(testCandidate); } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // remove restCompat check @ParametersFactory public static Iterable parameters() throws Exception { - String restCompatProperty = System.getProperty("tests.restCompat"); - if ("true".equals(restCompatProperty)) { - return createParametersWithLegacyNodeSelectorSupport(); - } else { - return createParameters(); - } + return createParameters(); } @Override diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 6118453d7805e..0e1dd9fec1b1a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -388,8 +388,6 @@ - do: indices.close: index: test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: cat.aliases: @@ -425,8 +423,6 @@ - do: indices.close: index: test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: cat.aliases: {} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.component_templates/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.component_templates/10_basic.yml new file mode 100644 index 0000000000000..688fdd776450b --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.component_templates/10_basic.yml @@ -0,0 +1,122 @@ +--- +setup: + + - do: + cluster.put_component_template: + name: captain_america + body: + template: + settings: + sort: + field: field + mappings: + properties: + field: + type: keyword + aliases: + steve_rogers: {} + + - do: + cluster.put_component_template: + name: ms_marvel + body: + template: + settings: + default_pipeline: pipeline_a + final_pipeline: pipeline_b + version: 2 + _meta: + data: {} + + - do: + cluster.put_component_template: + name: captain_marvel + body: + version: 3 + template: + mappings: + properties: + field1: + type: keyword + field2: + type: long + aliases: + carol_danvers: {} + monica_rambeau: {} + + +--- +"Retrieve all": + + - do: + cat.component_templates: {} + + - match: + $body: > + / + (^|\n)captain_america \s* + 1 \s* + 1 \s* + 1 \s* + 0 \s* + \[\]\s* + (\n|$) + / + + - match: + $body: > + / + (^|\n)captain_marvel \s+ + 3 \s+ + 2 \s+ + 2 \s+ + 0 \s+ + 0 \s+ + \[\]\s* + (\n|$) + / + + - match: + $body: > + / + (^|\n)ms_marvel \s+ + 2 \s+ + 0 \s+ + 0 \s+ + 2 \s+ + 1 \s+ + \[\]\s* + (\n|$) + / + +--- +"Retrieve by name (verbose/headers)": + + - do: + cat.component_templates: + name: ms_marvel + v: true + + - match: + $body: > + / + ^name \s+version \s*alias_count \s*mapping_count \s*settings_count \s*metadata_count \s*included_in\n + ms_marvel \s* 2 \s* 0 \s* 0 \s* 2 \s* 1 \s*\[\]\s*$ + / + + +--- +"Retrieve by wildcard (sorted)": + + - do: + cat.component_templates: + name: captain_* + s: name + + - match: + $body: > + / + ^captain_america \s* \s*1 \s*1 \s*1 \s*0 \s*\[\]\s*\n + captain_marvel \s*3 \s*2 \s*2 \s*0 \s*0 \s*\[\]\s*$ + / + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml index d687462df5872..ce0ad990a2c21 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.indices/10_basic.yml @@ -87,8 +87,6 @@ - do: indices.close: index: index-2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: @@ -132,8 +130,6 @@ - do: indices.close: index: index-2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: @@ -278,8 +274,6 @@ - do: indices.close: index: bar - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: cat.indices: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml index 21b307c42398d..55f7457b429ab 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.recovery/10_basic.yml @@ -97,8 +97,6 @@ - do: indices.close: index: index2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.segments/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.segments/10_basic.yml index 646530214bf09..94052cb4fe967 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.segments/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.segments/10_basic.yml @@ -97,8 +97,6 @@ - do: indices.close: index: index1 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: catch: bad_request diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml index 03d8b2068d23e..45f381eab80b1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -45,6 +45,7 @@ indexing.index_time .+ \n indexing.index_total .+ \n indexing.index_failed .+ \n + indexing.index_failed_due_to_version_conflict .+ \n merges.current .+ \n merges.current_docs .+ \n merges.current_size .+ \n diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml index d045775d695b4..caf4c55e00e7e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml @@ -69,8 +69,6 @@ - do: indices.close: index: test_closed - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - match: { acknowledged: true } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml index f698d3399f27d..800dec2a795a4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.component_template/10_basic.yml @@ -171,3 +171,33 @@ - match: {component_templates.0.component_template.template.lifecycle.enabled: true} - match: {component_templates.0.component_template.template.lifecycle.data_retention: "10d"} - is_true: component_templates.0.component_template.template.lifecycle.rollover + +--- +"Deprecated local parameter": + - requires: + capabilities: + - method: GET + path: /_component_template + capabilities: ["local_param_deprecated"] + test_runner_features: ["capabilities", "warnings"] + reason: Deprecation was implemented with capability + + - do: + cluster.get_component_template: + local: true + warnings: + - "the [?local] query parameter to this API has no effect, is now deprecated, and will be removed in a future version" + +--- +"Deprecated local parameter works in v8 compat mode": + - requires: + test_runner_features: ["headers"] + + - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" + cluster.get_component_template: + local: true + + - exists: component_templates diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml index c1c9741eb5fa3..0ffbd7fa557c3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml @@ -219,8 +219,6 @@ - do: indices.close: index: index-2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged # closing the index-2 turns the cluster health back to green @@ -297,8 +295,6 @@ - do: indices.close: index: index-2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml index 8756b35569135..feef05e3733f6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/30_indices_options.yml @@ -25,8 +25,6 @@ setup: - do: indices.close: index: index-2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: cluster.health: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.state/30_expand_wildcards.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.state/30_expand_wildcards.yml index 25520b26d8302..b44e6aee33eaa 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.state/30_expand_wildcards.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.state/30_expand_wildcards.yml @@ -27,8 +27,6 @@ setup: - do: indices.close: index: test_close_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" --- "Test expand_wildcards parameter on closed, open indices and both": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml index 9da6d2c5f086e..ce3f7f0198399 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml @@ -1,9 +1,5 @@ --- setup: - - requires: - cluster_features: "mapper.query_index_mode" - reason: "require index_mode" - - do: indices.create: index: test_metrics diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index 13f6ca58ea295..a0061272a2c23 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -1014,10 +1014,6 @@ flattened field: --- flattened field with ignore_above: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -1070,10 +1066,6 @@ flattened field with ignore_above: --- flattened field with ignore_above and arrays: - - requires: - cluster_features: ["mapper.flattened.ignore_above_with_arrays_support"] - reason: requires support of ignore_above synthetic source with arrays - - do: indices.create: index: test @@ -1127,10 +1119,6 @@ flattened field with ignore_above and arrays: --- completion: - - requires: - cluster_features: ["mapper.source.synthetic_source_fallback"] - reason: introduced in 8.15.0 - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml index 414c24cfffd7d..7b8f785a2cb93 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml @@ -2,7 +2,6 @@ "Metrics object indexing": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: @@ -69,7 +68,6 @@ "Root with metrics": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: requires supporting subobjects auto setting - do: @@ -131,7 +129,6 @@ "Metrics object indexing with synthetic source": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: @@ -201,7 +198,6 @@ "Root without subobjects with synthetic source": - requires: test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ] - cluster_features: ["mapper.subobjects_auto"] reason: added in 8.4.0 - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml index d0e1759073e1b..8645c91a51ad3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -171,7 +171,6 @@ index: test_lookup - match: { test_lookup.settings.index.number_of_shards: "1"} - - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} --- "Create lookup index with one shard": @@ -196,7 +195,6 @@ index: test_lookup - match: { test_lookup.settings.index.number_of_shards: "1"} - - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} --- "Create lookup index with two shards": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index af3d88fb35734..72dddcf8052cc 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -6,7 +6,7 @@ setup: --- object with unmapped fields: - requires: - cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -56,10 +56,6 @@ object with unmapped fields: --- unmapped arrays: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -103,7 +99,7 @@ unmapped arrays: --- nested object with unmapped fields: - requires: - cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -154,7 +150,7 @@ nested object with unmapped fields: --- empty object with unmapped fields: - requires: - cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -314,10 +310,6 @@ disabled object contains array: --- disabled subobject: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -353,10 +345,6 @@ disabled subobject: --- disabled subobject with array: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -393,10 +381,6 @@ disabled subobject with array: --- mixed disabled and enabled objects: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -488,10 +472,6 @@ object with dynamic override: --- subobject with dynamic override: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -537,10 +517,6 @@ subobject with dynamic override: --- object array in object with dynamic override: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -592,10 +568,6 @@ object array in object with dynamic override: --- value array in object with dynamic override: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -636,10 +608,6 @@ value array in object with dynamic override: --- nested object: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -682,10 +650,6 @@ nested object: --- nested object next to regular: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -729,10 +693,6 @@ nested object next to regular: --- nested object with disabled: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -818,10 +778,6 @@ nested object with disabled: --- doubly nested object: - - requires: - cluster_features: ["mapper.track_ignored_source"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -915,7 +871,7 @@ doubly nested object: --- subobjects auto: - requires: - cluster_features: ["mapper.subobjects_auto", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source and supporting subobjects auto setting - do: @@ -1003,10 +959,6 @@ subobjects auto: --- synthetic_source with copy_to: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1140,10 +1092,6 @@ synthetic_source with copy_to: --- synthetic_source with disabled doc_values: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires disabled doc_values support in synthetic source - - do: indices.create: index: test @@ -1224,10 +1172,6 @@ synthetic_source with disabled doc_values: --- fallback synthetic_source for text field: - - requires: - cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] - reason: requires disabled doc_values support in synthetic source - - do: indices.create: index: test @@ -1259,10 +1203,6 @@ fallback synthetic_source for text field: --- synthetic_source with copy_to and ignored values: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1328,10 +1268,6 @@ synthetic_source with copy_to and ignored values: --- synthetic_source with copy_to field having values in source: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1392,10 +1328,6 @@ synthetic_source with copy_to field having values in source: --- synthetic_source with ignored source field using copy_to: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1457,10 +1389,6 @@ synthetic_source with ignored source field using copy_to: --- synthetic_source with copy_to field from dynamic template having values in source: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1555,7 +1483,6 @@ synthetic_source with copy_to field from dynamic template having values in sourc --- synthetic_source with copy_to and invalid values for copy: - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_fix"] reason: requires copy_to support in synthetic source test_runner_features: "contains" @@ -1592,10 +1519,6 @@ synthetic_source with copy_to and invalid values for copy: --- synthetic_source with copy_to pointing inside object: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1697,10 +1620,6 @@ synthetic_source with copy_to pointing inside object: --- synthetic_source with copy_to pointing to ambiguous field: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1745,10 +1664,6 @@ synthetic_source with copy_to pointing to ambiguous field: --- synthetic_source with copy_to pointing to ambiguous field and subobjects false: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1794,10 +1709,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects false: --- synthetic_source with copy_to pointing to ambiguous field and subobjects auto: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -1845,7 +1756,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects auto: synthetic_source with copy_to pointing at dynamic field: - requires: test_runner_features: contains - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] reason: requires copy_to support in synthetic source - do: @@ -1931,10 +1841,6 @@ synthetic_source with copy_to pointing at dynamic field: --- synthetic_source with copy_to pointing inside dynamic object: - - requires: - cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"] - reason: requires copy_to support in synthetic source - - do: indices.create: index: test @@ -2012,3 +1918,41 @@ synthetic_source with copy_to pointing inside dynamic object: hits.hits.2.fields: c.copy.keyword: [ "hello", "zap" ] +--- +create index with use_synthetic_source: + - requires: + cluster_features: ["mapper.synthetic_recovery_source"] + reason: requires synthetic recovery source + + - do: + indices.create: + index: test + body: + settings: + index: + recovery: + use_synthetic_source: true + mapping: + source: + mode: synthetic + + - do: + indices.get_settings: {} + - match: { test.settings.index.mapping.source.mode: synthetic} + - is_true: test.settings.index.recovery.use_synthetic_source + + - do: + index: + index: test + id: 1 + refresh: true + body: { foo: bar } + - match: { _version: 1 } + + - do: + indices.disk_usage: + index: test + run_expensive_tasks: true + flush: false + - gt: { test.store_size_in_bytes: 0 } + - is_false: test.fields._recovery_source diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index 095665e9337b1..803b8a7d0062f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -6,7 +6,7 @@ setup: --- object param - store complex object: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -71,7 +71,7 @@ object param - store complex object: --- object param - object array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -136,7 +136,7 @@ object param - object array: --- object param - object array within array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -180,7 +180,7 @@ object param - object array within array: --- object param - no object array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -223,7 +223,7 @@ object param - no object array: --- object param - field ordering in object array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -273,7 +273,7 @@ object param - field ordering in object array: --- object param - nested object array next to other fields: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -380,7 +380,7 @@ object param - nested object with stored array: --- index param - nested array within array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires tracking ignored source - do: @@ -428,7 +428,7 @@ index param - nested array within array: # 112156 stored field under object with store_array_source: - requires: - cluster_features: ["mapper.source.synthetic_source_stored_fields_advance_fix", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires bug fix to be implemented - do: @@ -477,10 +477,6 @@ stored field under object with store_array_source: --- field param - keep root array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -535,10 +531,6 @@ field param - keep root array: --- field param - keep nested array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -605,7 +597,6 @@ field param - keep nested array: field param - keep root singleton fields: - requires: test_runner_features: close_to - cluster_features: ["mapper.synthetic_source_keep"] reason: requires keeping singleton source - do: @@ -695,7 +686,6 @@ field param - keep root singleton fields: field param - keep nested singleton fields: - requires: test_runner_features: close_to - cluster_features: ["mapper.synthetic_source_keep"] reason: requires keeping singleton source - do: @@ -776,10 +766,6 @@ field param - keep nested singleton fields: --- field param - nested array within array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires tracking ignored source - - do: indices.create: index: test @@ -821,7 +807,7 @@ field param - nested array within array: --- index param - root arrays: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires keeping array source - do: @@ -900,10 +886,6 @@ index param - root arrays: --- index param - dynamic root arrays: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -952,10 +934,6 @@ index param - dynamic root arrays: --- index param - object array within array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1001,10 +979,6 @@ index param - object array within array: --- index param - no object array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1045,10 +1019,6 @@ index param - no object array: --- index param - field ordering: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1095,10 +1065,6 @@ index param - field ordering: --- index param - nested arrays: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1162,10 +1128,6 @@ index param - nested arrays: --- index param - nested object with stored array: - - requires: - cluster_features: ["mapper.synthetic_source_keep"] - reason: requires keeping array source - - do: indices.create: index: test @@ -1214,7 +1176,7 @@ index param - nested object with stored array: --- index param - flattened fields: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] + cluster_features: ["mapper.bwc_workaround_9_0"] reason: requires keeping array source - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_template/10_basic.yml index 67592a013e8f1..4031423209830 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_template/10_basic.yml @@ -31,10 +31,15 @@ setup: --- "Test indices.exists_template with local flag": + - requires: + test_runner_features: ["allowed_warnings"] + - do: indices.exists_template: name: test local: true + allowed_warnings: + - "the [?local] query parameter to this API has no effect, is now deprecated, and will be removed in a future version" - is_false: '' diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml index e2fd28e745f0e..c72f10aaa66d4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get/10_basic.yml @@ -40,8 +40,6 @@ setup: - do: indices.close: index: test_index_3 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: cluster.health: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 63ab40f3bf578..d454fde15c824 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -297,8 +297,6 @@ setup: - do: indices.close: index: test_index_2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: indices.get_alias: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml index c47df413df9e7..afb3b0d1ff83c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml @@ -82,14 +82,48 @@ setup: --- "Get index template with local flag": + - requires: + test_runner_features: ["allowed_warnings"] - do: indices.get_index_template: name: test local: true + allowed_warnings: + - "the [?local] query parameter to this API has no effect, is now deprecated, and will be removed in a future version" - match: {index_templates.0.name: test} +--- +"Deprecated local parameter": + - requires: + capabilities: + - method: GET + path: /_get_index_template + capabilities: ["local_param_deprecated"] + test_runner_features: ["capabilities", "warnings"] + reason: Deprecation was implemented with capability + + - do: + indices.get_index_template: + local: true + warnings: + - "the [?local] query parameter to this API has no effect, is now deprecated, and will be removed in a future version" + +--- +"Deprecated local parameter works in v8 compat mode": + - requires: + test_runner_features: ["headers"] + + - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" + indices.get_index_template: + local: true + + - exists: index_templates + --- "Add data stream lifecycle": - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml index 853837f1375ff..0eb717f7f6c1f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/50_wildcard_expansion.yml @@ -55,8 +55,6 @@ setup: - do: indices.close: index: test-xyy - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: cluster.health: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_template/10_basic.yml index 9becbd54a3773..c602d913502d5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_template/10_basic.yml @@ -62,14 +62,50 @@ setup: --- "Get template with local flag": + - requires: + test_runner_features: ["allowed_warnings"] - do: indices.get_template: name: test local: true + allowed_warnings: + - "the [?local] query parameter to this API has no effect, is now deprecated, and will be removed in a future version" - is_true: test +--- +"Deprecated local parameter": + - requires: + capabilities: + - method: GET + path: /_template + capabilities: ["local_param_deprecated"] + test_runner_features: ["capabilities", "warnings"] + reason: Deprecation was implemented with capability + + - do: + indices.get_template: + name: test + local: true + warnings: + - "the [?local] query parameter to this API has no effect, is now deprecated, and will be removed in a future version" + +--- +"Deprecated local parameter works in v8 compat mode": + - requires: + test_runner_features: ["headers"] + + - do: + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" + indices.get_template: + name: test + local: true + + - exists: test.index_patterns + --- "Get template with flat settings and master timeout": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml index 9101475fc9055..d1614df7f9037 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -15,8 +15,6 @@ - do: indices.close: index: test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: @@ -55,8 +53,6 @@ - do: indices.close: index: test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: @@ -113,8 +109,6 @@ - do: indices.close: index: "index_*" - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - match: { acknowledged: true } - match: { shards_acknowledged: true } @@ -123,11 +117,14 @@ - match: { indices.index_3.closed: true } --- -"?wait_for_active_shards=index-setting is deprecated": +"?wait_for_active_shards=index-setting is removed": - requires: - cluster_features: ["gte_v8.0.0"] - reason: "required deprecation warning is only emitted in 8.0 and later" - test_runner_features: ["warnings"] + reason: "Parameter value 'index-setting' of 'wait-for-active-shards' is rejected with specialised error." + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /{index}/_close + capabilities: [ wait-for-active-shards-index-setting-removed ] - do: indices.create: @@ -137,8 +134,7 @@ number_of_replicas: 0 - do: + catch: /The 'index-setting' value for parameter 'wait_for_active_shards' is the default behaviour and this configuration value is not supported anymore. Please remove 'wait_for_active_shards=index-setting'/ indices.close: index: "index_*" wait_for_active_shards: index-setting - warnings: - - "?wait_for_active_shards=index-setting is now the default behaviour; the 'index-setting' value for this parameter should no longer be used since it will become unsupported in version 9" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml index ec71423bdc24b..222731c294597 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml @@ -24,8 +24,6 @@ setup: - do: indices.close: index: _all - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: @@ -57,8 +55,6 @@ setup: - do: indices.close: index: test_* - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: @@ -90,8 +86,6 @@ setup: - do: indices.close: index: '*' - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 3d82539944a97..89816be5ca8e7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -453,8 +453,6 @@ --- "Composable index templates that include subobjects: auto at root": - requires: - cluster_features: ["mapper.subobjects_auto"] - reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" test_runner_features: "allowed_warnings" - do: @@ -504,8 +502,6 @@ --- "Composable index templates that include subobjects: auto on arbitrary field": - requires: - cluster_features: ["mapper.subobjects_auto"] - reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0" test_runner_features: "allowed_warnings" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/10_basic.yml index 00cdd287f06df..0772017bcb24b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/10_basic.yml @@ -92,8 +92,6 @@ setup: - do: indices.close: index: test-index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: indices.put_settings: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml index 06865d2f620e3..8ad06910ebe4d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml @@ -59,8 +59,6 @@ - do: indices.close: index: test_2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - is_true: acknowledged - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/20_synthetic_source.yml new file mode 100644 index 0000000000000..493b834fc5a90 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/20_synthetic_source.yml @@ -0,0 +1,33 @@ +--- +test recovery empty index with use_synthetic_source: + - requires: + cluster_features: ["mapper.synthetic_recovery_source"] + reason: requires synthetic recovery source + + - do: + indices.create: + index: test + body: + settings: + index: + number_of_replicas: 0 + recovery: + use_synthetic_source: true + mapping: + source: + mode: synthetic + + - do: + indices.get_settings: {} + - match: { test.settings.index.mapping.source.mode: synthetic} + - is_true: test.settings.index.recovery.use_synthetic_source + + - do: + indices.put_settings: + index: test + body: + index.number_of_replicas: 1 + + - do: + cluster.health: + wait_for_events: languid diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_cluster/10_basic_resolve_cluster.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_cluster/10_basic_resolve_cluster.yml index ba341e0d220e1..46bd0b8099e4a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_cluster/10_basic_resolve_cluster.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_cluster/10_basic_resolve_cluster.yml @@ -20,8 +20,6 @@ setup: - do: indices.close: index: index2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" --- "Resolve cluster with indices and aliases": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_index/10_basic_resolve_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_index/10_basic_resolve_index.yml index f7252d0a9a89c..b689cfba43b86 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_index/10_basic_resolve_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.resolve_index/10_basic_resolve_index.yml @@ -24,8 +24,6 @@ setup: - do: indices.close: index: test_index2 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.segments/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.segments/10_basic.yml index 0235d5219e47e..f0cfc4e585cc6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.segments/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.segments/10_basic.yml @@ -105,8 +105,6 @@ - do: indices.close: index: index1 - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: catch: bad_request diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml index c88d638199dba..d07d03cb7146c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml @@ -1,8 +1,5 @@ --- sort doc with nested object: - - requires: - cluster_features: ["mapper.index_sorting_on_nested"] - reason: uses index sorting on nested fields - do: indices.create: index: test @@ -66,9 +63,6 @@ sort doc with nested object: --- sort doc on nested field: - - requires: - cluster_features: [ "mapper.index_sorting_on_nested" ] - reason: uses index sorting on nested fields - do: catch: /cannot apply index sort to field \[nested_field\.foo\] under nested object \[nested_field\]/ indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml index 463df7d2ab1bb..2a31b3bd387c4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml @@ -10,14 +10,6 @@ setup: --- create logs index: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - - do: indices.create: index: test @@ -78,14 +70,6 @@ create logs index: --- using default timestamp field mapping: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - - do: indices.create: index: test-timestamp-missing @@ -110,14 +94,6 @@ using default timestamp field mapping: --- missing hostname field: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - - do: indices.create: index: test-hostname-missing @@ -149,14 +125,6 @@ missing hostname field: --- missing sort field: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - - do: catch: bad_request indices.create: @@ -190,14 +158,6 @@ missing sort field: --- non-default sort settings: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - - do: indices.create: index: test-sort @@ -244,14 +204,10 @@ non-default sort settings: --- override sort order settings: - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - + cluster_features: [ "index.logsdb_no_host_name_field" ] + reason: "Change in default sort config for logsdb" - do: + catch: bad_request indices.create: index: test-sort-order body: @@ -278,28 +234,16 @@ override sort order settings: message: type: text - - do: - indices.get_settings: - index: test-sort-order - - - is_true: test-sort-order - - match: { test-sort-order.settings.index.mode: "logsdb" } - - match: { test-sort-order.settings.index.sort.field.0: null } - - match: { test-sort-order.settings.index.sort.field.1: null } - - match: { test-sort-order.settings.index.sort.order.0: "asc" } - - match: { test-sort-order.settings.index.sort.order.1: "asc" } + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "index.sort.fields:[] index.sort.order:[asc, asc], size mismatch" } --- override sort missing settings: - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - + cluster_features: [ "index.logsdb_no_host_name_field" ] + reason: "Change in default sort config for logsdb" - do: + catch: bad_request indices.create: index: test-sort-missing body: @@ -326,28 +270,16 @@ override sort missing settings: message: type: text - - do: - indices.get_settings: - index: test-sort-missing - - - is_true: test-sort-missing - - match: { test-sort-missing.settings.index.mode: "logsdb" } - - match: { test-sort-missing.settings.index.sort.field.0: null } - - match: { test-sort-missing.settings.index.sort.field.1: null } - - match: { test-sort-missing.settings.index.sort.missing.0: "_last" } - - match: { test-sort-missing.settings.index.sort.missing.1: "_first" } + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "index.sort.fields:[] index.sort.missing:[_last, _first], size mismatch" } --- override sort mode settings: - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - + cluster_features: [ "index.logsdb_no_host_name_field" ] + reason: "Change in default sort config for logsdb" - do: + catch: bad_request indices.create: index: test-sort-mode body: @@ -374,21 +306,12 @@ override sort mode settings: message: type: text - - do: - indices.get_settings: - index: test-sort-mode - - - is_true: test-sort-mode - - match: { test-sort-mode.settings.index.mode: "logsdb" } - - match: { test-sort-mode.settings.index.sort.field.0: null } - - match: { test-sort-mode.settings.index.sort.field.1: null } - - match: { test-sort-mode.settings.index.sort.mode.0: "max" } - - match: { test-sort-mode.settings.index.sort.mode.1: "max" } + - match: { error.type: "illegal_argument_exception" } + - match: { error.reason: "index.sort.fields:[] index.sort.mode:[MAX, MAX], size mismatch" } --- override sort field using nested field type in sorting: - requires: - cluster_features: ["mapper.index_sorting_on_nested"] test_runner_features: [ capabilities ] capabilities: - method: PUT @@ -434,14 +357,6 @@ override sort field using nested field type in sorting: --- override sort field using nested field type: - - requires: - cluster_features: ["mapper.index_sorting_on_nested"] - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - do: indices.create: @@ -475,14 +390,6 @@ override sort field using nested field type: --- routing path not allowed in logs mode: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - - do: catch: bad_request indices.create: @@ -514,15 +421,49 @@ routing path not allowed in logs mode: - match: { error.reason: "[index.routing_path] requires [index.mode=time_series]" } --- -start time not allowed in logs mode: +routing path allowed in logs mode with routing on sort fields: - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" + cluster_features: [ "routing.logsb_route_on_sort_fields" ] + reason: introduction of route on index sorting fields + - do: + indices.create: + index: test + body: + settings: + index: + mode: logsdb + number_of_replicas: 0 + number_of_shards: 2 + routing_path: [ host.name, agent_id ] + logsdb: + route_on_sort_fields: true + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + agent_id: + type: keyword + process_id: + type: integer + http_method: + type: keyword + message: + type: text + + - do: + indices.get_settings: + index: test + + - is_true: test + - match: { test.settings.index.mode: logsdb } + - match: { test.settings.index.logsdb.route_on_sort_fields: "true" } + - match: { test.settings.index.routing_path: [ host.name, agent_id ] } + +--- +start time not allowed in logs mode: - do: catch: bad_request indices.create: @@ -556,14 +497,6 @@ start time not allowed in logs mode: --- end time not allowed in logs mode: - - requires: - test_runner_features: [ capabilities ] - capabilities: - - method: PUT - path: /{index} - capabilities: [ logsdb_index_mode ] - reason: "Support for 'logsdb' index mode capability required" - - do: catch: bad_request indices.create: @@ -643,10 +576,10 @@ ignore dynamic beyond limit logsdb override value: - match: { test-ignore-dynamic-override.settings.index.mapping.total_fields.ignore_dynamic_beyond_limit: "false" } --- -logsdb with default ignore dynamic beyond limit and default sorting: +default ignore dynamic beyond limit and default sorting: - requires: - cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] - reason: requires default value for ignore_dynamic_beyond_limit + cluster_features: [ "index.logsdb_no_host_name_field" ] + reason: "No host.name field injection" - do: indices.create: @@ -656,19 +589,8 @@ logsdb with default ignore dynamic beyond limit and default sorting: index: mode: logsdb mapping: - # NOTE: When the index mode is set to `logsdb`, the `host.name` field is automatically injected if - # sort settings are not overridden. - # With `subobjects` set to `true` (default), this creates a `host` object field and a nested `name` - # keyword field (`host.name`). - # - # As a result, there are always at least 4 statically mapped fields (`@timestamp`, `host`, `host.name` - # and `name`). We cannot use a field limit lower than 4 because these fields are always present. - # - # Indeed, if `index.mapping.total_fields.ignore_dynamic_beyond_limit` is `true`, any dynamically - # mapped fields beyond the limit `index.mapping.total_fields.limit` are ignored, but the statically - # mapped fields are always counted. total_fields: - limit: 4 + limit: 2 mappings: properties: "@timestamp": @@ -688,9 +610,9 @@ logsdb with default ignore dynamic beyond limit and default sorting: refresh: true body: - '{ "index": { } }' - - '{ "@timestamp": "2024-08-13T12:30:00Z", "name": "foo", "host.name": "92f4a67c", "value": 10, "message": "the quick brown fox", "region": "us-west", "pid": 153462 }' + - '{ "@timestamp": "2024-08-13T12:30:00Z", "name": "foo", "value": 10, "message": "the quick brown fox", "region": "us-west", "pid": 153462 }' - '{ "index": { } }' - - '{ "@timestamp": "2024-08-13T12:01:00Z", "name": "bar", "host.name": "24eea278", "value": 20, "message": "jumps over the lazy dog", "region": "us-central", "pid": 674972 }' + - '{ "@timestamp": "2024-08-13T12:01:00Z", "name": "bar", "value": 20, "message": "jumps over the lazy dog", "region": "us-central", "pid": 674972 }' - match: { errors: false } - do: @@ -712,132 +634,82 @@ logsdb with default ignore dynamic beyond limit and default sorting: - match: { hits.hits.1._ignored: [ "message", "pid", "region", "value" ] } --- -logsdb with default ignore dynamic beyond limit and non-default sorting: +default ignore dynamic beyond limit and default sorting with hostname: - requires: - cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] - reason: requires default value for ignore_dynamic_beyond_limit + cluster_features: [ "index.logsdb_no_host_name_field" ] + reason: "No host.name field injection" - do: indices.create: - index: test-logsdb-non-default-sort + index: test-logsdb-default-sort body: settings: index: - sort.field: [ "name" ] - sort.order: [ "desc" ] mode: logsdb mapping: - # NOTE: Here sort settings are overridden and we do not have any additional statically mapped field other - # than `name` and `timestamp`. As a result, there are only 2 statically mapped fields. total_fields: - limit: 2 + limit: 3 mappings: properties: "@timestamp": type: date - name: + host.name: type: keyword - do: indices.get_settings: - index: test-logsdb-non-default-sort + index: test-logsdb-default-sort - - match: { test-logsdb-non-default-sort.settings.index.mode: "logsdb" } + - match: { test-logsdb-default-sort.settings.index.mode: "logsdb" } - do: bulk: - index: test-logsdb-non-default-sort + index: test-logsdb-default-sort refresh: true body: - '{ "index": { } }' - - '{ "@timestamp": "2024-08-13T12:30:00Z", "name": "foo", "host.name": "92f4a67c", "value": 10, "message": "the quick brown fox", "region": "us-west", "pid": 153462 }' + - '{ "@timestamp": "2024-08-13T12:30:00Z", "host.name": "foo", "value": 10, "message": "the quick brown fox", "region": "us-west", "pid": 153462 }' - '{ "index": { } }' - - '{ "@timestamp": "2024-08-13T12:01:00Z", "name": "bar", "host.name": "24eea278", "value": 20, "message": "jumps over the lazy dog", "region": "us-central", "pid": 674972 }' + - '{ "@timestamp": "2024-08-13T12:01:00Z", "host.name": "bar", "value": 20, "message": "jumps over the lazy dog", "region": "us-central", "pid": 674972 }' - match: { errors: false } - do: search: - index: test-logsdb-non-default-sort + index: test-logsdb-default-sort body: query: match_all: {} sort: "@timestamp" - match: { hits.total.value: 2 } - - match: { hits.hits.0._source.name: "bar" } + - match: { hits.hits.0._source.host.name: "bar" } - match: { hits.hits.0._source.value: 20 } - match: { hits.hits.0._source.message: "jumps over the lazy dog" } - - match: { hits.hits.0._ignored: [ "host", "message", "pid", "region", "value" ] } - - match: { hits.hits.1._source.name: "foo" } + - match: { hits.hits.0._ignored: [ "message", "pid", "region", "value" ] } + - match: { hits.hits.1._source.host.name: "foo" } - match: { hits.hits.1._source.value: 10 } - match: { hits.hits.1._source.message: "the quick brown fox" } - - match: { hits.hits.1._ignored: [ "host", "message", "pid", "region", "value" ] } - ---- -logsdb with default ignore dynamic beyond limit and too low limit: - - requires: - cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] - reason: requires default value for ignore_dynamic_beyond_limit - - - do: - catch: bad_request - indices.create: - index: test-logsdb-low-limit - body: - settings: - index: - mode: logsdb - mapping: - # NOTE: When the index mode is set to `logsdb`, the `host.name` field is automatically injected if - # sort settings are not overridden. - # With `subobjects` set to `true` (default), this creates a `host` object field and a nested `name` - # keyword field (`host.name`). - # - # As a result, there are always at least 4 statically mapped fields (`@timestamp`, `host`, `host.name` - # and `name`). We cannot use a field limit lower than 4 because these fields are always present. - # - # Indeed, if `index.mapping.total_fields.ignore_dynamic_beyond_limit` is `true`, any dynamically - # mapped fields beyond the limit `index.mapping.total_fields.limit` are ignored, but the statically - # mapped fields are always counted. - total_fields: - limit: 3 - mappings: - properties: - "@timestamp": - type: date - name: - type: keyword - - match: { error.type: "illegal_argument_exception" } - - match: { error.reason: "Limit of total fields [3] has been exceeded" } + - match: { hits.hits.1._ignored: [ "message", "pid", "region", "value" ] } --- -logsdb with default ignore dynamic beyond limit and subobjects false: +default ignore dynamic beyond limit and non-default sorting: - requires: - cluster_features: ["mapper.logsdb_default_ignore_dynamic_beyond_limit"] - reason: requires default value for ignore_dynamic_beyond_limit + cluster_features: [ "index.logsdb_no_host_name_field" ] + reason: "No host.name field injection" - do: indices.create: - index: test-logsdb-subobjects-false + index: test-logsdb-non-default-sort body: settings: index: + sort.field: [ "name" ] + sort.order: [ "desc" ] mode: logsdb mapping: - # NOTE: When the index mode is set to `logsdb`, the `host.name` field is automatically injected if - # sort settings are not overridden. - # With `subobjects` set to `false` anyway, a single `host.name` keyword field is automatically mapped. - # - # As a result, there are just 3 statically mapped fields (`@timestamp`, `host.name` and `name`). - # We cannot use a field limit lower than 3 because these fields are always present. - # - # Indeed, if `index.mapping.total_fields.ignore_dynamic_beyond_limit` is `true`, any dynamically - # mapped fields beyond the limit `index.mapping.total_fields.limit` are ignored, but the statically - # mapped fields are always counted. total_fields: - limit: 3 + limit: 2 mappings: - subobjects: false properties: "@timestamp": type: date @@ -846,13 +718,13 @@ logsdb with default ignore dynamic beyond limit and subobjects false: - do: indices.get_settings: - index: test-logsdb-subobjects-false + index: test-logsdb-non-default-sort - - match: { test-logsdb-subobjects-false.settings.index.mode: "logsdb" } + - match: { test-logsdb-non-default-sort.settings.index.mode: "logsdb" } - do: bulk: - index: test-logsdb-subobjects-false + index: test-logsdb-non-default-sort refresh: true body: - '{ "index": { } }' @@ -863,7 +735,7 @@ logsdb with default ignore dynamic beyond limit and subobjects false: - do: search: - index: test-logsdb-subobjects-false + index: test-logsdb-non-default-sort body: query: match_all: {} @@ -873,8 +745,8 @@ logsdb with default ignore dynamic beyond limit and subobjects false: - match: { hits.hits.0._source.name: "bar" } - match: { hits.hits.0._source.value: 20 } - match: { hits.hits.0._source.message: "jumps over the lazy dog" } - - match: { hits.hits.0._ignored: [ "message", "pid", "region", "value" ] } + - match: { hits.hits.0._ignored: [ "host", "message", "pid", "region", "value" ] } - match: { hits.hits.1._source.name: "foo" } - match: { hits.hits.1._source.value: 10 } - match: { hits.hits.1._source.message: "the quick brown fox" } - - match: { hits.hits.1._ignored: [ "message", "pid", "region", "value" ] } + - match: { hits.hits.1._ignored: [ "host", "message", "pid", "region", "value" ] } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index 084f104932d99..8485aba0ecc6a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -55,9 +55,6 @@ keyword: --- keyword with normalizer: - - requires: - cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ] - reason: support for normalizer on keyword fields - do: indices.create: index: test-keyword-with-normalizer diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 9d6e8da8c1e1e..2a14c291d5d31 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -417,7 +417,6 @@ - requires: test_runner_features: [arbitrary_key] - cluster_features: ["mapper.query_index_mode"] reason: "_ignored_source added to mappings" - do: @@ -511,10 +510,6 @@ --- "Lucene segment level fields stats": - - requires: - cluster_features: ["mapper.segment_level_fields_stats"] - reason: "segment level fields stats" - - do: indices.create: index: index1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml index 3ec854e93d82c..20e9d92a36088 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml @@ -1,8 +1,6 @@ --- "Allocation stats": - requires: - cluster_features: ["stats.include_disk_thresholds"] - reason: "fs watermark stats was added in 8.15.0" test_runner_features: [arbitrary_key] - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml index 3432a1e34c018..6ca17cc9cdce9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml @@ -520,10 +520,6 @@ setup: --- "Null bounds": - - requires: - cluster_features: ["mapper.range.null_values_off_by_one_fix"] - reason: fixed in 8.15.0 - - do: index: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml index d732fb084db3d..9b3291e19c5fd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/30_max_analyzed_offset.yml @@ -115,12 +115,70 @@ setup: - match: {hits.hits.0.highlight.field2.0: "The quick brown fox went to the forest and saw another fox."} --- -"Plain highlighter with max_analyzed_offset < 0 should FAIL": +"Plain highlighter on a field WITH OFFSETS exceeding index.highlight.max_analyzed_offset with max_analyzed_offset=0 should FAIL": + + - requires: + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [ highlight_max_analyzed_offset_default ] + reason: Behavior of max_analyzed_offset query param changed in 8.18. + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + index: test1 + body: {"query" : {"match" : {"field2" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field2" : {}}, "max_analyzed_offset": 0}} + - match: { status: 400 } + - match: { error.root_cause.0.type: "x_content_parse_exception" } + - match: { error.caused_by.type: "illegal_argument_exception" } + - match: { error.caused_by.reason: "[max_analyzed_offset] must be a positive integer, or -1" } + +--- +"Plain highlighter on a field WITH OFFSETS exceeding index.highlight.max_analyzed_offset with max_analyzed_offset=1 should SUCCEED": - requires: cluster_features: ["gte_v7.12.0"] reason: max_analyzed_offset query param added in 7.12.0 + - do: + search: + rest_total_hits_as_int: true + index: test1 + body: {"query" : {"match" : {"field2" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field2" : {}}, "max_analyzed_offset": 1}} + - match: { hits.hits.0.highlight: null } + +--- +"Plain highlighter with max_analyzed_offset = -1 default to index analyze offset should SUCCEED": + + - requires: + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [ highlight_max_analyzed_offset_default ] + reason: Behavior of max_analyzed_offset query param changed in 8.18. + + - do: + search: + rest_total_hits_as_int: true + index: test1 + body: {"query" : {"match" : {"field2" : "fox"}}, "highlight" : {"type" : "plain", "fields" : {"field2" : {}}, "max_analyzed_offset": -1}} + - match: {hits.hits.0.highlight.field2.0: "The quick brown fox went to the forest and saw another fox."} + +--- +"Plain highlighter with max_analyzed_offset < -1 should FAIL": + + - requires: + test_runner_features: [capabilities] + capabilities: + - method: GET + path: /_search + capabilities: [ highlight_max_analyzed_offset_default ] + reason: Behavior of max_analyzed_offset query param changed in 8.18. + - do: catch: bad_request search: @@ -130,4 +188,4 @@ setup: - match: { status: 400 } - match: { error.root_cause.0.type: "x_content_parse_exception" } - match: { error.caused_by.type: "illegal_argument_exception" } - - match: { error.caused_by.reason: "[max_analyzed_offset] must be a positive integer" } + - match: { error.caused_by.reason: "[max_analyzed_offset] must be a positive integer, or -1" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml index bd14fb182ac5a..94db54d152941 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: 'unified_highlighter_matched_fields' - reason: 'test requires unified highlighter to support matched_fields' - - do: indices.create: index: index1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml index e49f0634a4887..1f07884c9fadf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/20_knn_retriever.yml @@ -82,7 +82,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -100,7 +100,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Get rescoring scores - hit ordering may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml new file mode 100644 index 0000000000000..2c16de61c6b15 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.retrievers/30_rescorer_retriever.yml @@ -0,0 +1,225 @@ +setup: + - requires: + cluster_features: [ "search.retriever.rescorer.enabled" ] + reason: "Support for rescorer retriever" + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + available: + type: boolean + features: + type: rank_features + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {"_id": 1 }}' + - '{"features": { "first_stage": 1, "second_stage": 10}, "available": true, "group": 1}' + - '{"index": {"_id": 2 }}' + - '{"features": { "first_stage": 2, "second_stage": 9}, "available": false, "group": 1}' + - '{"index": {"_id": 3 }}' + - '{"features": { "first_stage": 3, "second_stage": 8}, "available": false, "group": 3}' + - '{"index": {"_id": 4 }}' + - '{"features": { "first_stage": 4, "second_stage": 7}, "available": true, "group": 1}' + - '{"index": {"_id": 5 }}' + - '{"features": { "first_stage": 5, "second_stage": 6}, "available": true, "group": 3}' + - '{"index": {"_id": 6 }}' + - '{"features": { "first_stage": 6, "second_stage": 5}, "available": false, "group": 2}' + - '{"index": {"_id": 7 }}' + - '{"features": { "first_stage": 7, "second_stage": 4}, "available": true, "group": 3}' + - '{"index": {"_id": 8 }}' + - '{"features": { "first_stage": 8, "second_stage": 3}, "available": true, "group": 1}' + - '{"index": {"_id": 9 }}' + - '{"features": { "first_stage": 9, "second_stage": 2}, "available": true, "group": 2}' + - '{"index": {"_id": 10 }}' + - '{"features": { "first_stage": 10, "second_stage": 1}, "available": false, "group": 1}' + +--- +"Rescorer retriever basic": + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 10 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 9.0 } + + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 3 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: {} + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: {} + size: 2 + + - match: {hits.total.value: 10} + - match: {hits.hits.0._id: "8"} + - match: { hits.hits.0._score: 3.0 } + - match: {hits.hits.1._id: "9"} + - match: { hits.hits.1._score: 2.0 } + +--- +"Rescorer retriever with pre-filters": + - do: + search: + index: test + body: + retriever: + rescorer: + filter: + match: + available: true + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 6 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "4" } + - match: { hits.hits.1._score: 7.0 } + + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 4 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + filter: + match: + available: true + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 2 + + - match: { hits.total.value: 6 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.0._score: 6.0 } + - match: { hits.hits.1._id: "7" } + - match: { hits.hits.1._score: 4.0 } + +--- +"Rescorer retriever and collapsing": + - do: + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 10 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + collapse: + field: group + size: 3 + + - match: { hits.total.value: 10 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._score: 10.0 } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1._score: 8.0 } + - match: { hits.hits.2._id: "6" } + - match: { hits.hits.2._score: 5.0 } + +--- +"Rescorer retriever and invalid window size": + - do: + catch: "/\\[rescorer\\] requires \\[window_size: 5\\] be greater than or equal to \\[size: 10\\]/" + search: + index: test + body: + retriever: + rescorer: + rescore: + window_size: 5 + query: + rescore_query: + rank_feature: + field: "features.second_stage" + linear: { } + query_weight: 0 + retriever: + standard: + query: + rank_feature: + field: "features.first_stage" + linear: { } + size: 10 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml index a3d920d903ae8..bc4e262ea53c6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml @@ -1,8 +1,6 @@ setup: - requires: - cluster_features: "mapper.vectors.bit_vectors" test_runner_features: close_to - reason: 'bit vectors added in 8.15' - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml index 618951711cffd..cffc12a8d24ae 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml @@ -59,7 +59,6 @@ setup: --- "Simple knn query": - - do: search: index: my_index @@ -71,8 +70,9 @@ setup: field: my_vector query_vector: [1, 1, 1, 1] num_candidates: 5 + k: 5 - - match: { hits.total.value: 5 } # collector sees num_candidates docs + - match: { hits.total.value: 5 } - length: {hits.hits: 3} - match: { hits.hits.0._id: "1" } - match: { hits.hits.0.fields.my_name.0: v1 } @@ -93,8 +93,9 @@ setup: field: my_vector query_vector: [1, 1, 1, 1] num_candidates: 5 + k: 5 - - match: { hits.total.value: 5 } # collector sees num_candidates docs + - match: { hits.total.value: 5 } - length: {hits.hits: 3} - match: { hits.hits.0._id: "2" } - match: { hits.hits.0.fields.my_name.0: v2 } @@ -140,6 +141,7 @@ setup: field: my_vector query_vector: [1, 1, 1, 1] num_candidates: 5 + k: 5 - match: { hits.total.value: 5 } - length: { hits.hits: 3 } @@ -184,6 +186,7 @@ setup: field: my_vector query_vector: [1, 1, 1, 1] num_candidates: 100 + k: 100 - match: { hits.total.value: 10 } # 5 docs from each alias - length: {hits.hits: 6} @@ -213,6 +216,7 @@ setup: field: my_vector query_vector: [1, 1, 1, 1] num_candidates: 5 + k: 5 filter: term: my_name: v2 @@ -243,9 +247,10 @@ setup: field: my_vector query_vector: [1, 1, 1, 1] num_candidates: 5 + k: 5 - match: { hits.total.value: 2 } - - length: {hits.hits: 2} # knn query returns top 5 docs, but they are post-filtered to 2 docs + - length: {hits.hits: 2} # knn query returns top 3 docs, but they are post-filtered to 2 docs - match: { hits.hits.0._id: "2" } - match: { hits.hits.0.fields.my_name.0: v2 } - match: { hits.hits.1._id: "4" } @@ -271,4 +276,4 @@ setup: my_name: v1 - match: { hits.total.value: 0} - - length: { hits.hits: 0 } # knn query returns top 5 docs, but they are post-filtered to 0 docs + - length: { hits.hits: 0 } # knn query returns top 3 docs, but they are post-filtered to 0 docs diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml index c6f3e187f7953..c68565e6629f5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/120_knn_query_multiple_shards.yml @@ -166,55 +166,3 @@ setup: - close_to: { hits.hits.2._score: { value: 120, error: 0.00001 } } - close_to: { hits.hits.2.matched_queries.bm25_query: { value: 100.0, error: 0.00001 } } - close_to: { hits.hits.2.matched_queries.knn_query: { value: 20.0, error: 0.00001 } } - ---- -"Aggregations with collected number of docs depends on num_candidates": - - do: - search: - index: my_index - body: - size: 2 - query: - knn: - field: my_vector - query_vector: [1, 1, 1, 1] - num_candidates: 100 # collect up to 100 candidates from each shard - aggs: - my_agg: - terms: - field: my_name - order: - _key: asc - - - length: {hits.hits: 2} - - match: {hits.total.value: 12} - - match: {aggregations.my_agg.buckets.0.key: 'v1'} - - match: {aggregations.my_agg.buckets.1.key: 'v2'} - - match: {aggregations.my_agg.buckets.0.doc_count: 6} - - match: {aggregations.my_agg.buckets.1.doc_count: 6} - - - do: - search: - index: my_index - body: - size: 2 - query: - knn: - field: my_vector - query_vector: [ 1, 1, 1, 1 ] - num_candidates: 3 # collect 3 candidates from each shard - aggs: - my_agg2: - terms: - field: my_name - order: - _key: asc - my_sum_buckets: - sum_bucket: - buckets_path: "my_agg2>_count" - - - length: { hits.hits: 2 } - - match: { hits.total.value: 6 } - - match: { aggregations.my_agg2.buckets.0.key: 'v1' } - - match: { aggregations.my_agg2.buckets.1.key: 'v2' } - - match: { aggregations.my_sum_buckets.value: 6.0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml index 79ff3f61742f8..bf07144975650 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml @@ -273,6 +273,7 @@ setup: knn: field: nested.vector query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 5 num_candidates: 5 inner_hits: { size: 2, "fields": [ "nested.paragraph_id" ], _source: false } @@ -295,6 +296,7 @@ setup: knn: field: nested.vector query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 5 num_candidates: 5 inner_hits: { size: 2, "fields": [ "nested.paragraph_id" ], _source: false } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml index d52a5daf22344..1e54e497f286f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/140_knn_query_with_other_queries.yml @@ -69,6 +69,7 @@ setup: field: my_vector query_vector: [ 1, 1, 1, 1 ] num_candidates: 5 + k: 5 functions: - filter: { match: { my_name: v1 } } weight: 10 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml index 02962e049e267..26c52060dfb22 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/160_knn_query_missing_params.yml @@ -100,8 +100,9 @@ setup: knn: field: vector query_vector: [1, 1, 1] + k: 2 size: 1 - - match: { hits.total: 2 } # due to num_candidates defined as round(1.5 * size), so we only see 2 results + - match: { hits.total: 2 } # k defaults to size - length: { hits.hits: 1 } # one result is only returned though --- @@ -117,6 +118,7 @@ setup: field: vector query_vector: [-1, -1, -1] num_candidates: 1 + k: 1 size: 10 - match: { hits.total: 1 } @@ -137,9 +139,10 @@ setup: - knn: field: vector query_vector: [ 1, 1, 0] + k: 1 size: 1 - - match: { hits.total: 2 } # due to num_candidates defined as round(1.5 * size), so we only see 2 results from cat:A + - match: { hits.total: 1 } - length: { hits.hits: 1 } --- @@ -154,6 +157,7 @@ setup: - knn: field: vector query_vector: [1, 1, 0] + k: 2 - match: category: B tie_breaker: 0.8 @@ -175,6 +179,7 @@ setup: knn: field: nested.vector query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 2 inner_hits: { size: 1, "fields": [ "nested.paragraph_id" ], _source: false } size: 1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/180_update_dense_vector_type.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/180_update_dense_vector_type.yml index 855daeaa7f163..99943ef2671bb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/180_update_dense_vector_type.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/180_update_dense_vector_type.yml @@ -109,6 +109,7 @@ setup: field: embedding query_vector: [1, 1, 1, 1] num_candidates: 10 + k: 10 - match: { hits.total.value: 10 } - length: {hits.hits: 3} @@ -215,6 +216,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 20 + k: 20 - match: { hits.total.value: 20 } - length: { hits.hits: 3 } @@ -322,6 +324,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 30 + k: 30 - match: { hits.total.value: 30 } - length: { hits.hits: 4 } @@ -430,6 +433,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 40 + k: 40 - match: { hits.total.value: 40 } - length: { hits.hits: 5 } @@ -499,6 +503,7 @@ setup: field: embedding query_vector: [1, 1, 1, 1] num_candidates: 10 + k: 10 - match: { hits.total.value: 10 } - length: {hits.hits: 3} @@ -559,6 +564,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 20 + k: 20 - match: { hits.total.value: 20 } - length: { hits.hits: 3 } @@ -620,6 +626,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 30 + k: 30 - match: { hits.total.value: 30 } - length: { hits.hits: 4 } @@ -682,6 +689,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 40 + k: 40 - match: { hits.total.value: 40 } - length: { hits.hits: 5 } @@ -751,6 +759,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 10 + k: 10 - match: { hits.total.value: 10 } - length: { hits.hits: 3 } @@ -791,6 +800,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 10 + k: 10 - match: { hits.total.value: 10 } - length: { hits.hits: 3 } @@ -833,6 +843,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 20 + k: 20 - match: { hits.total.value: 20 } - length: { hits.hits: 3 } @@ -869,6 +880,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 20 + k: 20 - match: { hits.total.value: 20 } - length: { hits.hits: 3 } @@ -911,6 +923,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 30 + k: 30 - match: { hits.total.value: 30 } - length: { hits.hits: 4 } @@ -933,6 +946,7 @@ setup: knn: field: embedding query_vector: [ 1, 1, 1, 1 ] + k: 30 num_candidates: 30 - match: { hits.total.value: 30 } @@ -1769,6 +1783,7 @@ setup: field: embedding query_vector: [1, 1, 1, 1] num_candidates: 10 + k: 10 - match: { hits.total.value: 10 } - length: {hits.hits: 3} @@ -1875,6 +1890,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 20 + k: 20 - match: { hits.total.value: 20 } - length: { hits.hits: 3 } @@ -1982,6 +1998,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 30 + k: 30 - match: { hits.total.value: 30 } - length: { hits.hits: 4 } @@ -2090,6 +2107,7 @@ setup: field: embedding query_vector: [ 1, 1, 1, 1 ] num_candidates: 40 + k: 40 - match: { hits.total.value: 40 } - length: { hits.hits: 5 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml index f6538b573809a..c92c88df91641 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml @@ -1,8 +1,6 @@ # test how knn query interact with other queries setup: - requires: - cluster_features: "search.vectors.k_param_supported" - reason: 'k param for knn as query is required' test_runner_features: close_to - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/210_knn_search_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/210_knn_search_profile.yml index d4bf5e7e9807f..be35dcde2eff3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/210_knn_search_profile.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/210_knn_search_profile.yml @@ -106,9 +106,9 @@ setup: k: 3 num_candidates: 3 "rescore_vector": - "num_candidates_factor": 2.0 + "oversample": 2.0 - # We expect the knn search ops + rescoring num_cnaidates (for rescoring) per shard + # We expect the knn search ops + rescoring k * oversample (for rescoring) per shard - match: { profile.shards.0.dfs.knn.0.vector_operations_count: 6 } # Search with similarity to check number of operations are propagated correctly @@ -131,7 +131,7 @@ setup: num_candidates: 3 similarity: 100000 "rescore_vector": - "num_candidates_factor": 2.0 + "oversample": 2.0 - # We expect the knn search ops + rescoring num_cnaidates (for rescoring) per shard + # We expect the knn search ops + rescoring k * oversample (for rescoring) per shard - match: { profile.shards.0.dfs.knn.0.vector_operations_count: 6 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index 7d4690204acc7..8f846dd76721d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -558,7 +558,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -598,7 +598,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Compare scores as hit IDs may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml index 2567a4ac597d9..abde3e86dd05b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bbq" - reason: 'kNN float to better-binary quantization is required' - do: indices.create: index: bbq_hnsw @@ -115,7 +112,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -140,7 +137,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Get rescoring scores - hit ordering may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml index b1e35789e8737..229d705bc317c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml @@ -378,7 +378,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -398,7 +398,7 @@ setup: field: vector query_vector: [0.5, 111.3, -13.0, 14.8, -156.0] rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Get rescoring scores - hit ordering may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml index 54e9eadf42e0b..9b27aea4b1db7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.int4_quantization" - reason: 'kNN float to half-byte quantization is required' - do: indices.create: index: hnsw_byte_quantized @@ -556,7 +553,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -575,7 +572,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Get rescoring scores - hit ordering may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml index a3cd624ef0ab8..2541de7023bf0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bbq" - reason: 'kNN float to better-binary quantization is required' - do: indices.create: index: bbq_flat @@ -114,7 +111,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -139,7 +136,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Get rescoring scores - hit ordering may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml index a59aedceff3d3..358ff547036e6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_flat.yml @@ -264,7 +264,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -304,7 +304,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Compare scores as hit IDs may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml index 6796a92122f9a..f9f8d56e1d9c9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.int4_quantization" - reason: 'kNN float to half-byte quantization is required' - do: indices.create: index: int4_flat @@ -352,7 +349,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -371,7 +368,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Get rescoring scores - hit ordering may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml index d1d312449cb70..6b59b8f641ee9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int8_flat.yml @@ -269,7 +269,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -288,7 +288,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Get rescoring scores - hit ordering may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml index effa3fff61525..ef2ae3ba7ee0a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bit_vectors" - reason: 'mapper.vectors.bit_vectors' - - do: indices.create: index: test @@ -414,7 +410,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -454,7 +450,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Compare scores as hit IDs may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml index cdc1d9c64763e..07261e6a30c77 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml @@ -1,8 +1,4 @@ setup: - - requires: - cluster_features: "mapper.vectors.bit_vectors" - reason: 'mapper.vectors.bit_vectors' - - do: indices.create: index: test @@ -230,7 +226,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -270,7 +266,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Compare scores as hit IDs may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml index 213b571a0b4be..6559b8d969cb9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml @@ -263,7 +263,7 @@ setup: capabilities: - method: GET path: /_search - capabilities: [knn_quantized_vector_rescore] + capabilities: [knn_quantized_vector_rescore_oversample] - skip: features: "headers" @@ -303,7 +303,7 @@ setup: k: 3 num_candidates: 3 rescore_vector: - num_candidates_factor: 1.5 + oversample: 1.5 # Compare scores as hit IDs may change depending on how things are distributed - match: { hits.total: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index 44d966b76f34e..8915325c3a67b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -1128,10 +1128,6 @@ fetch geo_point: --- "Test with subobjects: auto": - - requires: - cluster_features: "mapper.subobjects_auto" - reason: requires support for subobjects auto setting - - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml index 1730a49f743d9..7e00cbb01c589 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml @@ -1,8 +1,5 @@ --- ignore_above mapping level setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -42,9 +39,6 @@ ignore_above mapping level setting: --- ignore_above mapping level setting on arrays: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -84,9 +78,6 @@ ignore_above mapping level setting on arrays: --- ignore_above mapping overrides setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -128,9 +119,6 @@ ignore_above mapping overrides setting: --- ignore_above mapping overrides setting on arrays: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -172,9 +160,6 @@ ignore_above mapping overrides setting on arrays: --- date ignore_above index level setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml index 772c3c24170cd..045f757b08302 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml @@ -5,9 +5,6 @@ setup: --- ignore_above mapping level setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -47,9 +44,6 @@ ignore_above mapping level setting: --- ignore_above mapping level setting on arrays: - - requires: - cluster_features: [ "mapper.flattened.ignore_above_with_arrays_support" ] - reason: requires support of ignore_above with arrays for flattened fields - do: indices.create: index: test @@ -90,9 +84,6 @@ ignore_above mapping level setting on arrays: --- ignore_above mapping overrides setting: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test @@ -135,9 +126,6 @@ ignore_above mapping overrides setting: --- ignore_above mapping overrides setting on arrays: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml index 3c29845871fe7..6e711ee143b06 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml @@ -16,9 +16,6 @@ ignore_above index setting negative value: --- keyword ignore_above mapping setting negative value: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: catch: bad_request indices.create: @@ -32,9 +29,6 @@ keyword ignore_above mapping setting negative value: --- flattened ignore_above mapping setting negative value: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: catch: bad_request indices.create: @@ -48,9 +42,6 @@ flattened ignore_above mapping setting negative value: --- wildcard ignore_above mapping setting negative value: - - requires: - cluster_features: [ "mapper.ignore_above_index_level_setting" ] - reason: introduce ignore_above index level setting - do: catch: bad_request indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml index a4a9b1aaecb22..71e0c2d147c1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml @@ -1,8 +1,5 @@ --- flattened ignore_above single-value field: - - requires: - cluster_features: [ "flattened.ignore_above_support" ] - reason: introduce ignore_above support in flattened fields - do: indices.create: index: test @@ -65,9 +62,6 @@ flattened ignore_above single-value field: --- flattened ignore_above multi-value field: - - requires: - cluster_features: [ "flattened.ignore_above_support" ] - reason: introduce ignore_above support in flattened fields - do: indices.create: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/80_indices_options.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/80_indices_options.yml index 2022389a69660..1af6816d902ae 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/80_indices_options.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/80_indices_options.yml @@ -37,8 +37,6 @@ - do: indices.close: index: index_closed - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: catch: /index_closed_exception/ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml index 1fefc8bffffa1..d3b2b5a412717 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/90_search_after.yml @@ -218,31 +218,6 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 00:30:04.828740" } - match: {hits.hits.0.sort: [1571617804828740000] } - ---- -"_shard_doc sort": - - requires: - cluster_features: ["gte_v7.12.0"] - reason: _shard_doc sort was added in 7.12 - - - do: - indices.create: - index: test - - do: - index: - index: test - id: "1" - body: { id: 1, foo: bar, age: 18 } - - - do: - catch: /\[_shard_doc\] sort field cannot be used without \[point in time\]/ - search: - index: test - body: - size: 1 - sort: ["_shard_doc"] - search_after: [ 0L ] - --- "Format sort values": - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml index da0f00d960534..70a3b0253c78f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -119,10 +119,6 @@ setup: - skip: features: headers - - requires: - cluster_features: ["simulate.mapping.validation"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: headers: Content-Type: application/json @@ -265,10 +261,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.validation.templates"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: indices.put_template: name: v1_template @@ -401,10 +393,6 @@ setup: - headers - allowed_warnings - - requires: - cluster_features: ["simulate.mapping.validation.templates"] - reason: "ingest simulate index mapping validation added in 8.16" - - do: allowed_warnings: - "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml index ecc6330a73123..133b556ce4c91 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/snapshot.restore/10_basic.yml @@ -46,8 +46,6 @@ setup: - do: indices.close: index : test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: snapshot.restore: @@ -92,8 +90,6 @@ setup: - do: indices.close: index : test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - do: snapshot.restore: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml index d6c98673253fb..4e6bd83f07955 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml @@ -1,8 +1,8 @@ ---- -"Reload analyzers for specific synonym set": +setup: - requires: cluster_features: ["gte_v8.10.0"] reason: Reloading analyzers for specific synonym set is introduced in 8.10.0 + # Create synonyms_set1 - do: synonyms.put_synonym: @@ -100,7 +100,12 @@ - '{"index": {"_index": "my_index2", "_id": "2"}}' - '{"my_field": "goodbye"}' - # An update of synonyms_set1 must trigger auto-reloading of analyzers only for synonyms_set1 +--- +"Reload analyzers for specific synonym set": +# These specific tests can't succeed in BwC, as synonyms auto-expand replicas are 0-all. Replicas can't be associated to +# upgraded nodes, and thus we are not able to guarantee that the shards are not failed. +# This test is skipped for BwC until synonyms index has auto-exapnd replicas set to 0-1. + - do: synonyms.put_synonym: id: synonyms_set1 @@ -108,13 +113,12 @@ synonyms_set: - synonyms: "hello, salute" - synonyms: "ciao => goodbye" + - match: { result: "updated" } - gt: { reload_analyzers_details._shards.total: 0 } - gt: { reload_analyzers_details._shards.successful: 0 } - match: { reload_analyzers_details._shards.failed: 0 } - - length: { reload_analyzers_details.reload_details: 1 } # reload details contain only a single index - - match: { reload_analyzers_details.reload_details.0.index: "my_index1" } - - match: { reload_analyzers_details.reload_details.0.reloaded_analyzers.0: "my_analyzer1" } + # Confirm that the index analyzers are reloaded for my_index1 - do: @@ -127,6 +131,23 @@ query: salute - match: { hits.total.value: 1 } +--- +"Check analyzer reloaded and non failed shards for bwc tests": + + - do: + synonyms.put_synonym: + id: synonyms_set1 + body: + synonyms_set: + - synonyms: "hello, salute" + - synonyms: "ciao => goodbye" + - match: { result: "updated" } + - gt: { reload_analyzers_details._shards.total: 0 } + - gt: { reload_analyzers_details._shards.successful: 0 } + - length: { reload_analyzers_details.reload_details: 1 } # reload details contain only a single index + - match: { reload_analyzers_details.reload_details.0.index: "my_index1" } + - match: { reload_analyzers_details.reload_details.0.reloaded_analyzers.0: "my_analyzer1" } + # Confirm that the index analyzers are still the same for my_index2 - do: search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml index 616afd3cf67ad..1e841c8893fc6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml @@ -122,8 +122,6 @@ missing dimension on routing path field: multi-value routing path field succeeds: - requires: test_runner_features: close_to - cluster_features: ["routing.multi_value_routing_path"] - reason: support for multi-value dimensions - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index 9fe3f5e0b7272..f25601fc2e228 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -127,7 +127,7 @@ exact match object type: reason: routing_path error message updated in 8.14.0 - do: - catch: '/All fields that match routing_path must be configured with \[time_series_dimension: true\] or flattened fields with a list of dimensions in \[time_series_dimensions\] and without the \[script\] parameter. \[dim\] was \[object\]./' + catch: '/All fields that match routing_path must be .*flattened fields.* \[dim\] was \[object\]./' indices.create: index: tsdb_index body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml index 4faa0424adb43..5a5ae03ab938f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml @@ -65,9 +65,6 @@ setup: --- generates a consistent id: - - requires: - cluster_features: "tsdb.ts_routing_hash_doc_value_parse_byte_ref" - reason: _tsid routing hash doc value parsing has been fixed - do: bulk: refresh: true @@ -427,7 +424,7 @@ delete over _bulk: - match: {items.0.delete.result: deleted} - match: {items.1.delete.result: deleted} - match: {items.2.delete.status: 404} - - match: {items.2.delete.error.reason: "invalid id [not found ++ not found] for index [id_generation_test] in time series mode"} + - match: {items.2.delete.error.reason: '/invalid\ id\ \[not\ found\ \+\+\ not\ found\]\ for\ index\ \[id_generation_test\]\ in\ time.series\ mode/'} --- routing_path matches deep object: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml index b0e1595f3d0e3..2f177d275c099 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml @@ -120,8 +120,6 @@ teardown: - do: indices.close: index : test_index - allowed_warnings: - - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" # Restore index - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml index dae50704dd0d0..a8d256bbc097e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml @@ -340,9 +340,6 @@ sort by tsid: --- aggs by index_mode: - - requires: - cluster_features: ["mapper.query_index_mode"] - reason: require _index_mode metadata field - do: search: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml index c32d3c50b0784..c71555dd073d6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml @@ -95,7 +95,7 @@ split: reason: tsdb indexing changed in 8.2.0 - do: - catch: /index-split is not supported because the destination index \[test\] is in time series mode/ + catch: /index-split is not supported because the destination index \[test\] is in time.series mode/ indices.split: index: test target: test_split diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index 54b2bf59c8ddc..142d1281ad12b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -75,7 +75,7 @@ index with routing: reason: tsdb indexing changed in 8.2.0 - do: - catch: /specifying routing is not supported because the destination index \[test\] is in time series mode/ + catch: /specifying routing is not supported because the destination index \[test\] is in time.series mode/ index: index: test routing: foo @@ -104,7 +104,7 @@ index with routing over _bulk: body: - '{"index": {"routing": "foo"}}' - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - - match: {items.0.index.error.reason: "specifying routing is not supported because the destination index [test] is in time series mode"} + - match: {items.0.index.error.reason: '/specifying\ routing\ is\ not\ supported\ because\ the\ destination\ index\ \[test\]\ is\ in\ time.series\ mode/'} --- noop update: @@ -120,7 +120,7 @@ noop update: - length: {hits.hits: 1} - do: - catch: /update is not supported because the destination index \[test\] is in time series mode/ + catch: /update is not supported because the destination index \[test\] is in time.series mode/ update: index: test id: "1" @@ -136,7 +136,7 @@ regular update: # We fail even though the document isn't found. - do: - catch: /update is not supported because the destination index \[test\] is in time series mode/ + catch: /update is not supported because the destination index \[test\] is in time.series mode/ update: index: test id: "1" @@ -165,7 +165,7 @@ update over _bulk: body: - '{"update": {"_id": 1}}' - '{"doc":{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}}' - - match: {items.0.update.error.reason: "update is not supported because the destination index [test] is in time series mode"} + - match: {items.0.update.error.reason: '/update\ is\ not\ supported\ because\ the\ destination\ index\ \[test\]\ is\ in\ time.series\ mode/'} --- search with routing: @@ -175,7 +175,7 @@ search with routing: # We fail even though the document isn't found. - do: - catch: /searching with a specified routing is not supported because the destination index \[test\] is in time series mode/ + catch: /searching with a specified routing is not supported because the destination index \[test\] is in time.series mode/ search: index: test routing: rrrr diff --git a/server/build.gradle b/server/build.gradle index 0bd807751ecbb..b186999c36ca3 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -15,7 +15,7 @@ apply plugin: 'elasticsearch.internal-test-artifact' publishing { publications { elastic { - artifactId 'elasticsearch' + artifactId = 'elasticsearch' } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index f5860cedcd989..68e65b16aa3a2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.cache.clear.TransportClearIndicesCacheAction; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -157,9 +158,11 @@ public void setup() { for (int i = 0; i < numIndices; i++) { indices.add("test" + i); } - for (String index : indices) { - assertAcked(prepareCreate(index).addAlias(new Alias(index + "-alias"))); - } + assertAcked( + indices.stream() + .map(index -> prepareCreate(index).addAlias(new Alias(index + "-alias"))) + .toArray(CreateIndexRequestBuilder[]::new) + ); ensureGreen(); } @@ -516,7 +519,7 @@ public void testDeleteIndex() { public void testGetMappings() { interceptTransportActions(GetMappingsAction.NAME); - GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(randomIndicesOrAliases()); + GetMappingsRequest getMappingsRequest = new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(randomIndicesOrAliases()); internalCluster().coordOnlyNodeClient().admin().indices().getMappings(getMappingsRequest).actionGet(); clearInterceptedActions(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index a954325c65a97..22bba47b0c5f9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -252,11 +252,13 @@ public void testDoNotWaitForCompletion() throws Exception { if (waitForCompletion) { assertFalse(cancelFuture.isDone()); } else { - assertBusy(() -> assertTrue(cancelFuture.isDone())); + cancelFuture.get(); } allowEntireRequest(rootRequest); waitForRootTask(mainTaskFuture, false); - cancelFuture.actionGet(); + if (waitForCompletion) { + cancelFuture.actionGet(); + } ensureBansAndCancellationsConsistency(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 1e16357a24412..8afdbc5906491 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -779,17 +779,13 @@ public void testTaskStoringSuccessfulResult() throws Exception { assertNoFailures(indicesAdmin().prepareRefresh(TaskResultsService.TASK_INDEX).get()); assertHitCount( + 1L, prepareSearch(TaskResultsService.TASK_INDEX).setSource( SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.action", taskInfo.action())) ), - 1L - ); - - assertHitCount( prepareSearch(TaskResultsService.TASK_INDEX).setSource( SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("task.node", taskInfo.taskId().getNodeId())) - ), - 1L + ) ); GetTaskResponse getResponse = expectFinishedTask(taskId); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java index 5a49322666768..b742661a32c1a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java @@ -81,7 +81,10 @@ public void testAllowed() { final IndicesAliasesRequest request = new IndicesAliasesRequest().origin("allowed"); request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("index").alias("alias")); assertAcked(client().admin().indices().aliases(request).actionGet()); - final GetAliasesResponse response = client().admin().indices().getAliases(new GetAliasesRequest("alias")).actionGet(); + final GetAliasesResponse response = client().admin() + .indices() + .getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "alias")) + .actionGet(); assertThat(response.getAliases().keySet().size(), equalTo(1)); assertThat(response.getAliases().keySet().iterator().next(), equalTo("index")); final List aliasMetadata = response.getAliases().get("index"); @@ -117,6 +120,8 @@ public void testSomeAllowed() { final Exception e = expectThrows(IllegalStateException.class, client().admin().indices().aliases(request)); final String index = "foo_allowed".equals(origin) ? "bar" : "foo"; assertThat(e, hasToString(containsString("origin [" + origin + "] not allowed for index [" + index + "]"))); - assertTrue(client().admin().indices().getAliases(new GetAliasesRequest("alias")).actionGet().getAliases().isEmpty()); + assertTrue( + client().admin().indices().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, "alias")).actionGet().getAliases().isEmpty() + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index b10e17b3f4e0f..ea7cec710e31e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -273,7 +273,7 @@ public void testAutoCreateSystemAliasViaComposableTemplateAllowsTemplates() thro private void assertAliasesHidden(String nonPrimaryIndex, Set aliasNames, int aliasCount) throws InterruptedException, ExecutionException { final GetAliasesResponse getAliasesResponse = indicesAdmin().getAliases( - new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden()) + new GetAliasesRequest(TEST_REQUEST_TIMEOUT).indicesOptions(IndicesOptions.strictExpandHidden()) ).get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java index 47f96aebacd7d..fa2b053ead348 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CloneIndexIT.java @@ -39,7 +39,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testCreateCloneIndex() { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); int numPrimaryShards = randomIntBetween(1, 5); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index e8160a311bedb..b14bf38f3cbcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -120,7 +120,7 @@ public void testNonNestedMappings() throws Exception { ) ); - GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get(); MappingMetadata mappings = response.mappings().get("test"); assertNotNull(mappings); @@ -130,7 +130,7 @@ public void testNonNestedMappings() throws Exception { public void testEmptyNestedMappings() throws Exception { assertAcked(prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().endObject())); - GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get(); MappingMetadata mappings = response.mappings().get("test"); assertNotNull(mappings); @@ -150,7 +150,7 @@ public void testEmptyMappings() throws Exception { prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject()) ); - GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get(); MappingMetadata mappings = response.mappings().get("test"); assertNotNull(mappings); @@ -158,8 +158,7 @@ public void testEmptyMappings() throws Exception { } public void testTwoEmptyEqualMappings() throws Exception { - assertAcked(prepareCreate("test1")); - assertAcked(prepareCreate("test2").setMapping(XContentFactory.jsonBuilder().startObject().endObject())); + assertAcked(prepareCreate("test1"), prepareCreate("test2").setMapping(XContentFactory.jsonBuilder().startObject().endObject())); FieldCapabilitiesRequest fieldCapsReq1 = new FieldCapabilitiesRequest(); fieldCapsReq1.indices("test1"); fieldCapsReq1.fields("*"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java index 112ed994385ef..b9dadf86c3345 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java @@ -115,7 +115,7 @@ public void testNonPrimarySystemIndexIsAutoCreatedViaConcreteName() throws Excep // Check that a non-primary system index is not assigned as the write index for the alias final GetAliasesResponse getAliasesResponse = indicesAdmin().getAliases( - new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden()) + new GetAliasesRequest(TEST_REQUEST_TIMEOUT).indicesOptions(IndicesOptions.strictExpandHidden()) ).actionGet(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); @@ -322,7 +322,7 @@ public void onFailure(Exception e) { */ private void assertAliases(String concreteIndex) { final GetAliasesResponse getAliasesResponse = indicesAdmin().getAliases( - new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden()) + new GetAliasesRequest(TEST_REQUEST_TIMEOUT).indicesOptions(IndicesOptions.strictExpandHidden()) ).actionGet(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); @@ -334,7 +334,7 @@ private void assertAliases(String concreteIndex) { private void assertHasAliases(Set aliasNames, String name, String primaryName, int aliasCount) throws InterruptedException, java.util.concurrent.ExecutionException { final GetAliasesResponse getAliasesResponse = indicesAdmin().getAliases( - new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden()) + new GetAliasesRequest(TEST_REQUEST_TIMEOUT).indicesOptions(IndicesOptions.strictExpandHidden()) ).get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); @@ -357,8 +357,9 @@ private void assertHasAliases(Set aliasNames, String name, String primar * Fetch the mappings and settings for {@link TestSystemIndexDescriptor#INDEX_NAME} and verify that they match the expected values. */ private void assertMappingsAndSettings(String expectedMappings, String concreteIndex) { - final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(INDEX_NAME)) - .actionGet(); + final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings( + new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME) + ).actionGet(); final Map mappings = getMappingsResponse.getMappings(); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 4f6d24b419595..4e9304a7979d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -113,8 +113,11 @@ public void testCreateShrinkIndexToN() { .get(); } flushAndRefresh(); - assertHitCount(prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); - assertHitCount(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); + assertHitCount( + 20, + prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")) + ); // relocate all shards to one node such that we can merge it. updateIndexSettings( @@ -145,9 +148,12 @@ public void testCreateShrinkIndexToN() { .get(); } flushAndRefresh(); - assertHitCount(prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); - assertHitCount(prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); - assertHitCount(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), 20); + assertHitCount( + 20, + prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")) + ); assertNoResizeSourceIndexSettings("first_shrink"); assertNoResizeSourceIndexSettings("second_shrink"); @@ -236,7 +242,7 @@ private static IndexMetadata indexMetadata(final Client client, final String ind public void testCreateShrinkIndex() { internalCluster().ensureAtLeastNumDataNodes(2); - IndexVersion version = IndexVersionUtils.randomVersion(random()); + IndexVersion version = IndexVersionUtils.randomWriteVersion(); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("index.version.created", version) ).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 8391ab270b1d1..980e94fd5dfce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -236,9 +236,12 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha GetResponse getResponse = client().prepareGet("second_split", Integer.toString(i)).setRouting(routingValue[i]).get(); assertTrue(getResponse.isExists()); } - assertHitCount(prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); - assertHitCount(prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); - assertHitCount(prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), numDocs); + assertHitCount( + numDocs, + prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")), + prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")) + ); if (useNested) { assertNested("source", numDocs); assertNested("first_split", numDocs); @@ -341,8 +344,8 @@ private static IndexMetadata indexMetadata(final Client client, final String ind return clusterStateResponse.getState().metadata().index(index); } - public void testCreateSplitIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + public void testCreateSplitIndex() { + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); prepareCreate("source").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", version) ).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 46b4675adc8a1..746f35992e721 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -649,9 +649,11 @@ public void testRolloverWithClosedIndexInAlias() { final String openNonwriteIndex = "open-index-nonwrite"; final String closedIndex = "closed-index-nonwrite"; final String writeIndexPrefix = "write-index-"; - assertAcked(prepareCreate(openNonwriteIndex).addAlias(new Alias(aliasName)).get()); - assertAcked(prepareCreate(closedIndex).addAlias(new Alias(aliasName)).get()); - assertAcked(prepareCreate(writeIndexPrefix + "000001").addAlias(new Alias(aliasName).writeIndex(true)).get()); + assertAcked( + prepareCreate(openNonwriteIndex).addAlias(new Alias(aliasName)), + prepareCreate(closedIndex).addAlias(new Alias(aliasName)), + prepareCreate(writeIndexPrefix + "000001").addAlias(new Alias(aliasName).writeIndex(true)) + ); ensureGreen(); index(closedIndex, null, "{\"foo\": \"bar\"}"); @@ -674,17 +676,18 @@ public void testRolloverWithClosedWriteIndex() throws Exception { final String openNonwriteIndex = "open-index-nonwrite"; final String closedIndex = "closed-index-nonwrite"; final String writeIndexPrefix = "write-index-"; - assertAcked(prepareCreate(openNonwriteIndex).addAlias(new Alias(aliasName)).get()); - assertAcked(prepareCreate(closedIndex).addAlias(new Alias(aliasName)).get()); - assertAcked(prepareCreate(writeIndexPrefix + "000001").addAlias(new Alias(aliasName).writeIndex(true)).get()); + assertAcked( + prepareCreate(openNonwriteIndex).addAlias(new Alias(aliasName)), + prepareCreate(closedIndex).addAlias(new Alias(aliasName)), + prepareCreate(writeIndexPrefix + "000001").addAlias(new Alias(aliasName).writeIndex(true)) + ); ensureGreen(openNonwriteIndex, closedIndex, writeIndexPrefix + "000001"); index(closedIndex, null, "{\"foo\": \"bar\"}"); index(aliasName, null, "{\"foo\": \"bar\"}"); index(aliasName, null, "{\"foo\": \"bar\"}"); refresh(aliasName); - assertAcked(indicesAdmin().prepareClose(closedIndex).get()); - assertAcked(indicesAdmin().prepareClose(writeIndexPrefix + "000001").get()); + assertAcked(indicesAdmin().prepareClose(closedIndex, writeIndexPrefix + "000001").get()); ensureGreen(aliasName); RolloverResponse rolloverResponse = indicesAdmin().prepareRolloverIndex(aliasName) @@ -863,7 +866,7 @@ public void testRolloverConcurrently() throws Exception { for (int i = 0; i < numOfThreads; i++) { var aliasName = "test-" + i; - var response = indicesAdmin().getAliases(new GetAliasesRequest(aliasName)).get(); + var response = indicesAdmin().getAliases(new GetAliasesRequest(TEST_REQUEST_TIMEOUT, aliasName)).get(); List>> actual = response.getAliases().entrySet().stream().toList(); List>> expected = new ArrayList<>(numberOfRolloversPerThread); int numOfIndices = numberOfRolloversPerThread + 1; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index e45555b1dec19..2cd319d148321 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -55,7 +55,7 @@ public void testBulkIndexCreatesMapping() throws Exception { bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); bulkBuilder.get(); assertBusy(() -> { - GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings().get(); + GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get(); assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30")); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index 67576059de1e0..5db2651c703d2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -307,11 +307,15 @@ public void testFinalReduce() throws ExecutionException, InterruptedException { public void testWaitForRefreshIndexValidation() throws Exception { int numberOfShards = randomIntBetween(3, 10); - assertAcked(prepareCreate("test1").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards))); - assertAcked(prepareCreate("test2").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards))); - assertAcked(prepareCreate("test3").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards))); - indicesAdmin().prepareAliases().addAlias("test1", "testAlias").get(); - indicesAdmin().prepareAliases().addAlias(new String[] { "test2", "test3" }, "testFailedAlias").get(); + assertAcked( + prepareCreate("test1").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)), + prepareCreate("test2").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)), + prepareCreate("test3").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)) + ); + assertAcked( + indicesAdmin().prepareAliases().addAlias("test1", "testAlias"), + indicesAdmin().prepareAliases().addAlias(new String[] { "test2", "test3" }, "testFailedAlias") + ); long[] validCheckpoints = new long[numberOfShards]; Arrays.fill(validCheckpoints, SequenceNumbers.UNASSIGNED_SEQ_NO); @@ -376,8 +380,10 @@ public void testShardCountLimit() throws Exception { try { final int numPrimaries1 = randomIntBetween(2, 10); final int numPrimaries2 = randomIntBetween(1, 10); - assertAcked(prepareCreate("test1").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numPrimaries1))); - assertAcked(prepareCreate("test2").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numPrimaries2))); + assertAcked( + prepareCreate("test1").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numPrimaries1)), + prepareCreate("test2").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numPrimaries2)) + ); // no exception prepareSearch("test1").get().decRef(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index cba1d9362c91e..d16fbf9298552 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -531,8 +531,10 @@ private void checkBrownFoxTermVector(Fields fields, String fieldName, boolean wi public void testDuelWithAndWithoutTermVectors() throws IOException, ExecutionException, InterruptedException { // setup indices String[] indexNames = new String[] { "with_tv", "without_tv" }; - assertAcked(prepareCreate(indexNames[0]).setMapping("field1", "type=text,term_vector=with_positions_offsets,analyzer=keyword")); - assertAcked(prepareCreate(indexNames[1]).setMapping("field1", "type=text,term_vector=no,analyzer=keyword")); + assertAcked( + prepareCreate(indexNames[1]).setMapping("field1", "type=text,term_vector=no,analyzer=keyword"), + prepareCreate(indexNames[0]).setMapping("field1", "type=text,term_vector=with_positions_offsets,analyzer=keyword") + ); ensureGreen(); // index documents with and without term vectors @@ -1074,9 +1076,7 @@ public void testWithKeywordAndNormalizer() throws IOException, ExecutionExceptio "type=text,term_vector=with_positions_offsets,analyzer=my_analyzer", "field2", "type=text,term_vector=with_positions_offsets,analyzer=keyword" - ) - ); - assertAcked( + ), prepareCreate(indexNames[1]).setSettings(builder.build()) .setMapping("field1", "type=keyword,normalizer=my_normalizer", "field2", "type=keyword") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index 309bf69f00be0..ad17d8dbaa4ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -323,10 +323,8 @@ public void testSearchingFilteringAliasesSingleIndex() throws Exception { } public void testSearchingFilteringAliasesTwoIndices() throws Exception { - logger.info("--> creating index [test1]"); - assertAcked(prepareCreate("test1").setMapping("name", "type=text")); - logger.info("--> creating index [test2]"); - assertAcked(prepareCreate("test2").setMapping("name", "type=text")); + logger.info("--> creating indices [test1, test2]"); + assertAcked(prepareCreate("test1").setMapping("name", "type=text"), prepareCreate("test2").setMapping("name", "type=text")); ensureGreen(); logger.info("--> adding filtering aliases to index [test1]"); @@ -525,8 +523,7 @@ public void testSearchingFilteringAliasesMultipleIndices() throws Exception { public void testDeletingByQueryFilteringAliases() throws Exception { logger.info("--> creating index [test1] and [test2"); - assertAcked(prepareCreate("test1").setMapping("name", "type=text")); - assertAcked(prepareCreate("test2").setMapping("name", "type=text")); + assertAcked(prepareCreate("test1").setMapping("name", "type=text"), prepareCreate("test2").setMapping("name", "type=text")); ensureGreen(); logger.info("--> adding filtering aliases to index [test1]"); @@ -580,8 +577,7 @@ public void testDeletingByQueryFilteringAliases() throws Exception { public void testDeleteAliases() throws Exception { logger.info("--> creating index [test1] and [test2]"); - assertAcked(prepareCreate("test1").setMapping("name", "type=text")); - assertAcked(prepareCreate("test2").setMapping("name", "type=text")); + assertAcked(prepareCreate("test1").setMapping("name", "type=text"), prepareCreate("test2").setMapping("name", "type=text")); ensureGreen(); logger.info("--> adding filtering aliases to index [test1]"); @@ -615,12 +611,11 @@ public void testDeleteAliases() throws Exception { assertAliasesVersionIncreases(indices, () -> indicesAdmin().prepareAliases().removeAlias(indices, aliases).get()); for (String alias : aliases) { - assertTrue(indicesAdmin().prepareGetAliases(alias).get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, alias).get().getAliases().isEmpty()); } logger.info("--> creating index [foo_foo] and [bar_bar]"); - assertAcked(prepareCreate("foo_foo")); - assertAcked(prepareCreate("bar_bar")); + assertAcked(prepareCreate("foo_foo"), prepareCreate("bar_bar")); ensureGreen(); logger.info("--> adding [foo] alias to [foo_foo] and [bar_bar]"); @@ -632,9 +627,9 @@ public void testDeleteAliases() throws Exception { () -> assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo*").alias("foo"))) ); - assertFalse(indicesAdmin().prepareGetAliases("foo").get().getAliases().isEmpty()); - assertTrue(indicesAdmin().prepareGetAliases("foo").setIndices("foo_foo").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("foo_foo").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("bar_bar").get().getAliases().isEmpty()); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")) @@ -788,7 +783,7 @@ public void testIndicesGetAliases() throws Exception { ); logger.info("--> getting alias1"); - GetAliasesResponse getResponse = indicesAdmin().prepareGetAliases("alias1").get(); + GetAliasesResponse getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias1").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); @@ -797,10 +792,10 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - assertFalse(indicesAdmin().prepareGetAliases("alias1").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias1").get().getAliases().isEmpty()); logger.info("--> getting all aliases that start with alias*"); - getResponse = indicesAdmin().prepareGetAliases("alias*").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias*").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(2)); @@ -814,7 +809,7 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue()); - assertFalse(indicesAdmin().prepareGetAliases("alias*").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias*").get().getAliases().isEmpty()); logger.info("--> creating aliases [bar, baz, foo]"); assertAliasesVersionIncreases( @@ -835,7 +830,7 @@ public void testIndicesGetAliases() throws Exception { ); logger.info("--> getting bar and baz for index bazbar"); - getResponse = indicesAdmin().prepareGetAliases("bar", "bac").setIndices("bazbar").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "bar", "bac").setIndices("bazbar").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2)); @@ -851,13 +846,13 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue()); - assertFalse(indicesAdmin().prepareGetAliases("bar").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("bac").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("bar").setIndices("bazbar").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("bac").setIndices("bazbar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "bar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "bac").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "bar").setIndices("bazbar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "bac").setIndices("bazbar").get().getAliases().isEmpty()); logger.info("--> getting *b* for index baz*"); - getResponse = indicesAdmin().prepareGetAliases("*b*").setIndices("baz*").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "*b*").setIndices("baz*").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2)); @@ -873,10 +868,10 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue()); - assertFalse(indicesAdmin().prepareGetAliases("*b*").setIndices("baz*").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "*b*").setIndices("baz*").get().getAliases().isEmpty()); logger.info("--> getting *b* for index *bar"); - getResponse = indicesAdmin().prepareGetAliases("b*").setIndices("*bar").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "b*").setIndices("*bar").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(2)); assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2)); @@ -897,10 +892,10 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), equalTo("bla")); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), equalTo("bla")); - assertFalse(indicesAdmin().prepareGetAliases("b*").setIndices("*bar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "b*").setIndices("*bar").get().getAliases().isEmpty()); logger.info("--> getting f* for index *bar"); - getResponse = indicesAdmin().prepareGetAliases("f*").setIndices("*bar").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "f*").setIndices("*bar").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); @@ -908,11 +903,11 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - assertFalse(indicesAdmin().prepareGetAliases("f*").setIndices("*bar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "f*").setIndices("*bar").get().getAliases().isEmpty()); // alias at work logger.info("--> getting f* for index *bac"); - getResponse = indicesAdmin().prepareGetAliases("foo").setIndices("*bac").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("*bac").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1)); @@ -921,10 +916,10 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("*bac").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("*bac").get().getAliases().isEmpty()); logger.info("--> getting foo for index foobar"); - getResponse = indicesAdmin().prepareGetAliases("foo").setIndices("foobar").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("foobar").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue()); @@ -932,13 +927,13 @@ public void testIndicesGetAliases() throws Exception { assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue()); assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue()); - assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("foobar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("foobar").get().getAliases().isEmpty()); for (String aliasName : new String[] { null, "_all", "*" }) { logger.info("--> getting {} alias for index foobar", aliasName); getResponse = aliasName != null - ? indicesAdmin().prepareGetAliases(aliasName).setIndices("foobar").get() - : indicesAdmin().prepareGetAliases().setIndices("foobar").get(); + ? indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, aliasName).setIndices("foobar").get() + : indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT).setIndices("foobar").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(1)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(4)); @@ -950,20 +945,20 @@ public void testIndicesGetAliases() throws Exception { // alias at work again logger.info("--> getting * for index *bac"); - getResponse = indicesAdmin().prepareGetAliases("*").setIndices("*bac").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "*").setIndices("*bac").get(); assertThat(getResponse, notNullValue()); assertThat(getResponse.getAliases().size(), equalTo(2)); assertThat(getResponse.getAliases().get("foobar").size(), equalTo(4)); assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2)); - assertFalse(indicesAdmin().prepareGetAliases("*").setIndices("*bac").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "*").setIndices("*bac").get().getAliases().isEmpty()); assertAcked(indicesAdmin().prepareAliases().removeAlias("foobar", "foo")); - getResponse = indicesAdmin().prepareGetAliases("foo").setIndices("foobar").get(); + getResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("foobar").get(); for (final Map.Entry> entry : getResponse.getAliases().entrySet()) { assertTrue(entry.getValue().isEmpty()); } - assertTrue(indicesAdmin().prepareGetAliases("foo").setIndices("foobar").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("foobar").get().getAliases().isEmpty()); } public void testGetAllAliasesWorks() { @@ -975,7 +970,7 @@ public void testGetAllAliasesWorks() { () -> assertAcked(indicesAdmin().prepareAliases().addAlias("index1", "alias1").addAlias("index2", "alias2")) ); - GetAliasesResponse response = indicesAdmin().prepareGetAliases().get(); + GetAliasesResponse response = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getAliases(), hasKey("index1")); assertThat(response.getAliases(), hasKey("index1")); } @@ -1119,8 +1114,11 @@ public void testAliasesWithBlocks() { () -> assertAcked(indicesAdmin().prepareAliases().addAlias("test", "alias1").addAlias("test", "alias2")) ); assertAliasesVersionIncreases("test", () -> assertAcked(indicesAdmin().prepareAliases().removeAlias("test", "alias1"))); - assertThat(indicesAdmin().prepareGetAliases("alias2").get().getAliases().get("test").size(), equalTo(1)); - assertFalse(indicesAdmin().prepareGetAliases("alias2").get().getAliases().isEmpty()); + assertThat( + indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias2").get().getAliases().get("test").size(), + equalTo(1) + ); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias2").get().getAliases().isEmpty()); } finally { disableIndexBlock("test", block); } @@ -1137,8 +1135,8 @@ public void testAliasesWithBlocks() { "test", () -> assertBlocked(indicesAdmin().prepareAliases().removeAlias("test", "alias2"), INDEX_READ_ONLY_BLOCK) ); - assertThat(indicesAdmin().prepareGetAliases("alias2").get().getAliases().get("test").size(), equalTo(1)); - assertFalse(indicesAdmin().prepareGetAliases("alias2").get().getAliases().isEmpty()); + assertThat(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias2").get().getAliases().get("test").size(), equalTo(1)); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias2").get().getAliases().isEmpty()); } finally { disableIndexBlock("test", SETTING_READ_ONLY); @@ -1155,21 +1153,23 @@ public void testAliasesWithBlocks() { "test", () -> assertBlocked(indicesAdmin().prepareAliases().removeAlias("test", "alias2"), INDEX_METADATA_BLOCK) ); - assertBlocked(indicesAdmin().prepareGetAliases("alias2"), INDEX_METADATA_BLOCK); - assertBlocked(indicesAdmin().prepareGetAliases("alias2"), INDEX_METADATA_BLOCK); + assertBlocked(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias2"), INDEX_METADATA_BLOCK); + assertBlocked(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias2"), INDEX_METADATA_BLOCK); } finally { disableIndexBlock("test", SETTING_BLOCKS_METADATA); } } - public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionException { - assertAcked(prepareCreate("foo_foo")); - assertAcked(prepareCreate("bar_bar")); - assertAliasesVersionIncreases(new String[] { "foo_foo", "bar_bar" }, () -> { - assertAcked(indicesAdmin().prepareAliases().addAlias("foo_foo", "foo")); - assertAcked(indicesAdmin().prepareAliases().addAlias("bar_bar", "foo")); - }); + public void testAliasActionRemoveIndex() { + assertAcked(prepareCreate("foo_foo"), prepareCreate("bar_bar")); + assertAliasesVersionIncreases( + new String[] { "foo_foo", "bar_bar" }, + () -> assertAcked( + indicesAdmin().prepareAliases().addAlias("bar_bar", "foo"), + indicesAdmin().prepareAliases().addAlias("foo_foo", "foo") + ) + ); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAliases().removeIndex("foo")); assertEquals( @@ -1179,12 +1179,12 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE assertAcked(indicesAdmin().prepareAliases().removeIndex("foo*")); assertFalse(indexExists("foo_foo")); - assertFalse(indicesAdmin().prepareGetAliases("foo").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").get().getAliases().isEmpty()); assertTrue(indexExists("bar_bar")); - assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").setIndices("bar_bar").get().getAliases().isEmpty()); assertAcked(indicesAdmin().prepareAliases().removeIndex("bar_bar")); - assertTrue(indicesAdmin().prepareGetAliases("foo").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foo").get().getAliases().isEmpty()); assertFalse(indexExists("bar_bar")); } @@ -1323,7 +1323,7 @@ public void testGetAliasAndAliasExistsForHiddenAliases() { } private void checkAliases() { - GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases("alias1").get(); + GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias1").get(); assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); AliasMetadata aliasMetadata = getAliasesResponse.getAliases().get("test").get(0); assertThat(aliasMetadata.alias(), equalTo("alias1")); @@ -1332,7 +1332,7 @@ private void checkAliases() { assertThat(aliasMetadata.searchRouting(), nullValue()); assertThat(aliasMetadata.isHidden(), nullValue()); - getAliasesResponse = indicesAdmin().prepareGetAliases("alias2").get(); + getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias2").get(); assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); aliasMetadata = getAliasesResponse.getAliases().get("test").get(0); assertThat(aliasMetadata.alias(), equalTo("alias2")); @@ -1341,7 +1341,7 @@ private void checkAliases() { assertThat(aliasMetadata.searchRouting(), nullValue()); assertThat(aliasMetadata.isHidden(), nullValue()); - getAliasesResponse = indicesAdmin().prepareGetAliases("alias3").get(); + getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias3").get(); assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); aliasMetadata = getAliasesResponse.getAliases().get("test").get(0); assertThat(aliasMetadata.alias(), equalTo("alias3")); @@ -1350,7 +1350,7 @@ private void checkAliases() { assertThat(aliasMetadata.searchRouting(), equalTo("search")); assertThat(aliasMetadata.isHidden(), nullValue()); - getAliasesResponse = indicesAdmin().prepareGetAliases("alias4").get(); + getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias4").get(); assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); aliasMetadata = getAliasesResponse.getAliases().get("test").get(0); assertThat(aliasMetadata.alias(), equalTo("alias4")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java index cf54bc50398c4..f0ee06336b493 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/NetNewSystemIndexAliasIT.java @@ -49,7 +49,7 @@ public void testGetAliasWithNetNewSystemIndices() throws Exception { } ensureGreen(); - GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(TEST_REQUEST_TIMEOUT); GetAliasesResponse aliasResponse = indicesAdmin().getAliases(getAliasesRequest).get(); assertThat(aliasResponse.getAliases().size(), is(0)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/SystemIndexAliasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/SystemIndexAliasIT.java index fbade0ac2c871..38696d75b93da 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/SystemIndexAliasIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/SystemIndexAliasIT.java @@ -41,7 +41,7 @@ public void testCreateAliasForSystemIndex() throws Exception { assertAcked(indicesAdmin().prepareAliases().addAlias(PRIMARY_INDEX_NAME, INDEX_NAME + "-system-alias")); final GetAliasesResponse getAliasesResponse = indicesAdmin().getAliases( - new GetAliasesRequest().indicesOptions(IndicesOptions.strictExpandHidden()) + new GetAliasesRequest(TEST_REQUEST_TIMEOUT).indicesOptions(IndicesOptions.strictExpandHidden()) ).get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 58b9af7724aaa..15b4a557b2b8b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -71,6 +70,7 @@ import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; import static org.elasticsearch.test.index.IndexVersionUtils.randomVersion; +import static org.elasticsearch.test.index.IndexVersionUtils.randomWriteVersion; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -218,7 +218,7 @@ private ClusterState.Builder randomCoordinationMetadata(ClusterState clusterStat private DiscoveryNode randomNode(String nodeId) { Version nodeVersion = VersionUtils.randomVersion(random()); - IndexVersion indexVersion = randomVersion(random()); + IndexVersion indexVersion = randomVersion(); return DiscoveryNodeUtils.builder(nodeId) .roles(emptySet()) .version(nodeVersion, IndexVersion.fromId(indexVersion.id() - 1_000_000), indexVersion) @@ -561,10 +561,10 @@ public IndexMetadata randomCreate(String name) { IndexMetadata.Builder builder = IndexMetadata.builder(name); Settings.Builder settingsBuilder = Settings.builder(); setRandomIndexSettings(random(), settingsBuilder); - settingsBuilder.put(randomSettings(Settings.EMPTY)).put(IndexMetadata.SETTING_VERSION_CREATED, randomVersion(random())); + settingsBuilder.put(randomSettings(Settings.EMPTY)).put(IndexMetadata.SETTING_VERSION_CREATED, randomWriteVersion()); builder.settings(settingsBuilder); builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); - builder.eventIngestedRange(IndexLongFieldRange.UNKNOWN, TransportVersion.current()); + builder.eventIngestedRange(IndexLongFieldRange.UNKNOWN); int aliasCount = randomInt(10); for (int i = 0; i < aliasCount; i++) { builder.putAlias(randomAlias()); @@ -736,7 +736,7 @@ public ClusterState.Custom randomCreate(String name) { ImmutableOpenMap.of(), null, SnapshotInfoTestUtils.randomUserMetadata(), - randomVersion(random()) + randomVersion() ) ); case 1 -> new RestoreInProgress.Builder().add( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index def125e5c99c3..9eed1f757b5b1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -183,7 +183,8 @@ public void testIndexTemplates() throws Exception { ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(clusterStateResponseUnfiltered.getState().metadata().templates().size(), is(greaterThanOrEqualTo(2))); - GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates("foo_template").get(); + GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "foo_template") + .get(); assertIndexTemplateExists(getIndexTemplatesResponse, "foo_template"); } @@ -257,11 +258,14 @@ public void testLargeClusterStatePublishing() throws Exception { .setTimeout(TimeValue.timeValueMinutes(1)) ); ensureGreen(); // wait for green state, so its both green, and there are no more pending events - MappingMetadata masterMappingMetadata = indicesAdmin().prepareGetMappings("test").get().getMappings().get("test"); + MappingMetadata masterMappingMetadata = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test") + .get() + .getMappings() + .get("test"); for (Client client : clients()) { MappingMetadata mappingMetadata = client.admin() .indices() - .prepareGetMappings("test") + .prepareGetMappings(TEST_REQUEST_TIMEOUT, "test") .setLocal(true) .get() .getMappings() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index 5e51c83d54795..b5c3861633350 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -95,7 +95,9 @@ public void testTemplateUpdate() throws Exception { // the updates only happen on cluster state updates, so we need to make sure that the cluster state updates are happening // so we need to simulate updates to make sure the template upgrade kicks in updateClusterSettings(Settings.builder().put(TestPlugin.UPDATE_TEMPLATE_DUMMY_SETTING.getKey(), updateCount.incrementAndGet())); - List templates = indicesAdmin().prepareGetTemplates("test_*").get().getIndexTemplates(); + List templates = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "test_*") + .get() + .getIndexTemplates(); assertThat(templates, hasSize(3)); boolean addedFound = false; boolean changedFound = false; @@ -139,7 +141,9 @@ private void assertTemplates() throws Exception { // so we need to simulate updates to make sure the template upgrade kicks in updateClusterSettings(Settings.builder().put(TestPlugin.UPDATE_TEMPLATE_DUMMY_SETTING.getKey(), updateCount.incrementAndGet())); - List templates = indicesAdmin().prepareGetTemplates("test_*").get().getIndexTemplates(); + List templates = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "test_*") + .get() + .getIndexTemplates(); assertThat(templates, hasSize(2)); boolean addedFound = false; boolean changedFound = false; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java index 198e27a70c0bc..7fb763b5b91a3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java @@ -39,8 +39,10 @@ public void testEnableRebalance() { // we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that // all shards are relocated to the second node which is not what we want here. It's solely a test for the settings to take effect final int numShards = 2; - assertAcked(prepareCreate("test").setSettings(indexSettings(numShards, 0))); - assertAcked(prepareCreate("test_1").setSettings(indexSettings(numShards, 0))); + assertAcked( + prepareCreate("test").setSettings(indexSettings(numShards, 0)), + prepareCreate("test_1").setSettings(indexSettings(numShards, 0)) + ); ensureGreen(); assertAllShardsOnNodes("test", firstNode); assertAllShardsOnNodes("test_1", firstNode); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java index 74fd945ed3779..b756b4ca770c4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java @@ -30,7 +30,6 @@ public void testClusterHasFeatures() { FeatureService service = internalCluster().getCurrentMasterNodeInstance(FeatureService.class); - assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id())); assertThat(service.getNodeFeatures(), hasKey(FeatureService.TEST_FEATURES_ENABLED.id())); // check the nodes all have a feature in their cluster state (there should always be features_supported) @@ -38,7 +37,7 @@ public void testClusterHasFeatures() { var features = response.getState().clusterFeatures().nodeFeatures(); Set missing = features.entrySet() .stream() - .filter(e -> e.getValue().contains(FeatureService.FEATURES_SUPPORTED.id()) == false) + .filter(e -> e.getValue().contains(FeatureService.TEST_FEATURES_ENABLED.id()) == false) .map(Map.Entry::getKey) .collect(Collectors.toSet()); assertThat(missing + " out of " + features.keySet() + " does not have the required feature", missing, empty()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java index d6ccdf3dc0399..256566045c59a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java @@ -115,7 +115,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { ) .get(); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(index).get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get(); assertNotNull( ((Map) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("integer_field") ); @@ -146,7 +146,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { ) .get(); - getMappingsResponse = indicesAdmin().prepareGetMappings(index).get(); + getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get(); assertNotNull( ((Map) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("float_field") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java index d116c091fbe4a..24af560f608d3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java @@ -97,7 +97,7 @@ public void testGlobalTemplatesDoNotApply() { assertAcked(indicesAdmin().prepareCreate("a_hidden_index").setSettings(Settings.builder().put("index.hidden", true).build())); - GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings("a_hidden_index").get(); + GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "a_hidden_index").get(); assertThat(mappingsResponse.mappings().size(), is(1)); MappingMetadata mappingMetadata = mappingsResponse.mappings().get("a_hidden_index"); assertNotNull(mappingMetadata); @@ -150,13 +150,13 @@ public void testAliasesForHiddenIndices() { ); // The index should be returned here when queried by name or by wildcard because the alias is visible - final GetAliasesRequestBuilder req = indicesAdmin().prepareGetAliases(visibleAlias); + final GetAliasesRequestBuilder req = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, visibleAlias); GetAliasesResponse response = req.get(); assertThat(response.getAliases().get(hiddenIndex), hasSize(1)); assertThat(response.getAliases().get(hiddenIndex).get(0).alias(), equalTo(visibleAlias)); assertThat(response.getAliases().get(hiddenIndex).get(0).isHidden(), nullValue()); - response = indicesAdmin().prepareGetAliases("alias*").get(); + response = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias*").get(); assertThat(response.getAliases().get(hiddenIndex), hasSize(1)); assertThat(response.getAliases().get(hiddenIndex).get(0).alias(), equalTo(visibleAlias)); assertThat(response.getAliases().get(hiddenIndex).get(0).isHidden(), nullValue()); @@ -169,19 +169,21 @@ public void testAliasesForHiddenIndices() { ); // Querying by name directly should get the right result - response = indicesAdmin().prepareGetAliases(hiddenAlias).get(); + response = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, hiddenAlias).get(); assertThat(response.getAliases().get(hiddenIndex), hasSize(1)); assertThat(response.getAliases().get(hiddenIndex).get(0).alias(), equalTo(hiddenAlias)); assertThat(response.getAliases().get(hiddenIndex).get(0).isHidden(), equalTo(true)); // querying by wildcard should get the right result because the indices options include hidden by default - response = indicesAdmin().prepareGetAliases("alias*").get(); + response = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias*").get(); assertThat(response.getAliases().get(hiddenIndex), hasSize(1)); assertThat(response.getAliases().get(hiddenIndex).get(0).alias(), equalTo(hiddenAlias)); assertThat(response.getAliases().get(hiddenIndex).get(0).isHidden(), equalTo(true)); // But we should get no results if we specify indices options that don't include hidden - response = indicesAdmin().prepareGetAliases("alias*").setIndicesOptions(IndicesOptions.strictExpandOpen()).get(); + response = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias*") + .setIndicesOptions(IndicesOptions.strictExpandOpen()) + .get(); assertThat(response.getAliases().get(hiddenIndex), nullValue()); // Now try with a hidden alias that starts with a dot @@ -192,7 +194,7 @@ public void testAliasesForHiddenIndices() { ); // Check that querying by dot-prefixed pattern returns the alias - response = indicesAdmin().prepareGetAliases(".alias*").get(); + response = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, ".alias*").get(); assertThat(response.getAliases().get(hiddenIndex), hasSize(1)); assertThat(response.getAliases().get(hiddenIndex).get(0).alias(), equalTo(dotHiddenAlias)); assertThat(response.getAliases().get(hiddenIndex).get(0).isHidden(), equalTo(true)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java index 960ee2fd7ca60..6bca87ebd6e3d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java @@ -49,7 +49,7 @@ public void testBasic() { assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest)); Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts"); assertThat(settings.get("index.mode"), equalTo("lookup")); - assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); + assertNull(settings.get("index.auto_expand_replicas")); Map allHosts = Map.of( "192.168.1.2", "Windows", @@ -141,7 +141,6 @@ public void testResizeLookupIndex() { Settings settings = client().admin().indices().prepareGetSettings("lookup-2").get().getIndexToSettings().get("lookup-2"); assertThat(settings.get("index.mode"), equalTo("lookup")); assertThat(settings.get("index.number_of_shards"), equalTo("1")); - assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); ResizeRequest split = new ResizeRequest("lookup-3", "lookup-1"); split.setResizeType(ResizeType.SPLIT); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index f7bf775bc4f8b..06561bc6d4c97 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -96,7 +96,11 @@ public void testSimpleDynamicMappingsSuccessful() { client().prepareIndex("index").setId("1").setSource("a.x", 1).get(); client().prepareIndex("index").setId("2").setSource("a.y", 2).get(); - Map mappings = indicesAdmin().prepareGetMappings("index").get().mappings().get("index").sourceAsMap(); + Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index") + .get() + .mappings() + .get("index") + .sourceAsMap(); assertTrue(new WriteField("properties.a", () -> mappings).exists()); assertTrue(new WriteField("properties.a.properties.x", () -> mappings).exists()); } @@ -183,7 +187,7 @@ private Map indexConcurrently(int numberOfFieldsToCreate, Settin for (int i = 0; i < numberOfFieldsToCreate; ++i) { assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists()); } - GetMappingsResponse mappings = indicesAdmin().prepareGetMappings("index").get(); + GetMappingsResponse mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index").get(); MappingMetadata indexMappings = mappings.getMappings().get("index"); assertNotNull(indexMappings); Map typeMappingsMap = indexMappings.getSourceAsMap(); @@ -214,7 +218,11 @@ public void testConcurrentDynamicMappingsWithConflictingType() throws Throwable for (int i = 0; i < numberOfDocsToCreate; ++i) { assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists()); } - Map index = indicesAdmin().prepareGetMappings("index").get().getMappings().get("index").getSourceAsMap(); + Map index = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index") + .get() + .getMappings() + .get("index") + .getSourceAsMap(); for (int i = 0, j = 1; i < numberOfDocsToCreate; i++, j++) { assertThat(new WriteField("properties.field" + i + ".type", () -> index).get(null), is(oneOf("long", "float"))); assertThat(new WriteField("properties.field" + j + ".type", () -> index).get(null), is(oneOf("long", "float"))); @@ -683,9 +691,12 @@ public void testDynamicRuntimeNoConflicts() { BulkResponse bulkItemResponses = client().bulk(bulkRequest).actionGet(); assertFalse(bulkItemResponses.buildFailureMessage(), bulkItemResponses.hasFailures()); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("one", "one")), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("one.two", 3.5)), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("one.two.three", "1")), 1); + assertHitCount( + 1, + prepareSearch("test").setQuery(new MatchQueryBuilder("one", "one")), + prepareSearch("test").setQuery(new MatchQueryBuilder("one.two", 3.5)), + prepareSearch("test").setQuery(new MatchQueryBuilder("one.two.three", "1")) + ); } public void testDynamicRuntimeObjectFields() { @@ -722,10 +733,13 @@ public void testDynamicRuntimeObjectFields() { BulkResponse bulkItemResponses = client().bulk(bulkRequest).actionGet(); assertFalse(bulkItemResponses.buildFailureMessage(), bulkItemResponses.hasFailures()); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.one", 1)), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("anything", "anything")), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one", "one")), 1); - assertHitCount(prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one.two", "1")), 1); + assertHitCount( + 1, + prepareSearch("test").setQuery(new MatchQueryBuilder("obj.one", 1)), + prepareSearch("test").setQuery(new MatchQueryBuilder("anything", "anything")), + prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one", "one")), + prepareSearch("test").setQuery(new MatchQueryBuilder("obj.runtime.one.two", "1")) + ); Exception exception = expectThrows(DocumentParsingException.class, prepareIndex("test").setSource("obj.runtime", "value")); assertThat( @@ -800,7 +814,11 @@ public void testSubobjectsFalseAtRoot() throws Exception { assertEquals(RestStatus.CREATED, indexResponse.status()); assertBusy(() -> { - Map mappings = indicesAdmin().prepareGetMappings("test").get().mappings().get("test").sourceAsMap(); + Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test") + .get() + .mappings() + .get("test") + .sourceAsMap(); @SuppressWarnings("unchecked") Map properties = (Map) mappings.get("properties"); assertEquals(4, properties.size()); @@ -845,7 +863,11 @@ public void testSubobjectsFalse() throws Exception { assertEquals(RestStatus.CREATED, indexResponse.status()); assertBusy(() -> { - Map mappings = indicesAdmin().prepareGetMappings("test").get().mappings().get("test").sourceAsMap(); + Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test") + .get() + .mappings() + .get("test") + .sourceAsMap(); Map properties = (Map) mappings.get("properties"); Map foo = (Map) properties.get("foo"); properties = (Map) foo.get("properties"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java index fea5d256b1993..1a51fc12fed8e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java @@ -37,7 +37,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase { public void testMultiFields() throws Exception { assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createTypeSource())); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get(); MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index"); assertThat(mappingMetadata, not(nullValue())); Map mappingSource = mappingMetadata.sourceAsMap(); @@ -53,7 +53,7 @@ public void testMultiFields() throws Exception { assertAcked(indicesAdmin().preparePutMapping("my-index").setSource(createPutMappingSource())); - getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get(); + getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get(); mappingMetadata = getMappingsResponse.mappings().get("my-index"); assertThat(mappingMetadata, not(nullValue())); mappingSource = mappingMetadata.sourceAsMap(); @@ -74,7 +74,7 @@ public void testMultiFields() throws Exception { public void testGeoPointMultiField() throws Exception { assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("geo_point"))); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get(); MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index"); assertThat(mappingMetadata, not(nullValue())); Map mappingSource = mappingMetadata.sourceAsMap(); @@ -91,18 +91,18 @@ public void testGeoPointMultiField() throws Exception { GeoPoint point = new GeoPoint(51, 19); prepareIndex("my-index").setId("1").setSource("a", point.toString()).setRefreshPolicy(IMMEDIATE).get(); assertHitCount( + 1L, prepareSearch("my-index").setSize(0) .setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS))), - 1L + prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())) ); - assertHitCount(prepareSearch("my-index").setSize(0).setQuery(matchQuery("a.b", point.geohash())), 1L); } @SuppressWarnings("unchecked") public void testCompletionMultiField() throws Exception { assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("completion"))); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get(); MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index"); assertThat(mappingMetadata, not(nullValue())); Map mappingSource = mappingMetadata.sourceAsMap(); @@ -123,7 +123,7 @@ public void testCompletionMultiField() throws Exception { public void testIpMultiField() throws Exception { assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("ip"))); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get(); MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index"); assertThat(mappingMetadata, not(nullValue())); Map mappingSource = mappingMetadata.sourceAsMap(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java index 07f9d9ee7b6c3..92e5eb8e046bc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/PeerRecoveryRetentionLeaseCreationIT.java @@ -48,7 +48,7 @@ public void testCanRecoverFromStoreWithoutPeerRecoveryRetentionLease() throws Ex Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleVersion(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomCompatibleWriteVersion(random())) ) ); ensureGreen(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 545ed83bb79c8..c53cf3b56f65a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -497,7 +497,9 @@ public void testDeleteIndexWildcard() throws Exception { public void testPutAlias() throws Exception { createIndex("foobar"); verify(indicesAdmin().prepareAliases().addAlias("foobar", "foobar_alias"), false); - assertFalse(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("foobar").get().getAliases().isEmpty()); + assertFalse( + indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foobar").get().getAliases().isEmpty() + ); } @@ -505,16 +507,24 @@ public void testPutAliasWildcard() throws Exception { createIndex("foo", "foobar", "bar", "barbaz"); verify(indicesAdmin().prepareAliases().addAlias("foo*", "foobar_alias"), false); - assertFalse(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("foo").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("foobar").get().getAliases().isEmpty()); - assertTrue(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("bar").get().getAliases().isEmpty()); - assertTrue(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("barbaz").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foo").get().getAliases().isEmpty()); + assertFalse( + indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foobar").get().getAliases().isEmpty() + ); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("bar").get().getAliases().isEmpty()); + assertTrue( + indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("barbaz").get().getAliases().isEmpty() + ); verify(indicesAdmin().prepareAliases().addAlias("*", "foobar_alias"), false); - assertFalse(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("foo").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("foobar").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("bar").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("foobar_alias").setIndices("barbaz").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foo").get().getAliases().isEmpty()); + assertFalse( + indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("foobar").get().getAliases().isEmpty() + ); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("bar").get().getAliases().isEmpty()); + assertFalse( + indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "foobar_alias").setIndices("barbaz").get().getAliases().isEmpty() + ); } @@ -527,26 +537,26 @@ public void testPutMapping() throws Exception { } verify(indicesAdmin().preparePutMapping("foo").setSource("field", "type=text"), false); - assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue()); verify(indicesAdmin().preparePutMapping("b*").setSource("field", "type=text"), false); - assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue()); - assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue()); verify(indicesAdmin().preparePutMapping("_all").setSource("field", "type=text"), false); - assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue()); - assertThat(indicesAdmin().prepareGetMappings("foobar").get().mappings().get("foobar"), notNullValue()); - assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue()); - assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue()); verify(indicesAdmin().preparePutMapping().setSource("field", "type=text"), false); - assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue()); - assertThat(indicesAdmin().prepareGetMappings("foobar").get().mappings().get("foobar"), notNullValue()); - assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue()); - assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue()); verify(indicesAdmin().preparePutMapping("c*").setSource("field", "type=text"), true); assertAcked(indicesAdmin().prepareClose("barbaz").get()); verify(indicesAdmin().preparePutMapping("barbaz").setSource("field", "type=text"), false); - assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue()); + assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue()); } public static final class TestPlugin extends Plugin { @@ -646,7 +656,7 @@ static ValidateQueryRequestBuilder validateQuery(String... indices) { } static GetAliasesRequestBuilder getAliases(String... indices) { - return indicesAdmin().prepareGetAliases("dummy").setIndices(indices); + return indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "dummy").setIndices(indices); } static GetFieldMappingsRequestBuilder getFieldMapping(String... indices) { @@ -654,7 +664,7 @@ static GetFieldMappingsRequestBuilder getFieldMapping(String... indices) { } static GetMappingsRequestBuilder getMapping(String... indices) { - return indicesAdmin().prepareGetMappings(indices); + return indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, indices); } static GetSettingsRequestBuilder getSettings(String... indices) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 52492ba7ce657..03fb3d43793cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -321,9 +321,11 @@ public void testQueryRewriteDates() throws Exception { public void testQueryRewriteDatesWithNow() throws Exception { Client client = client(); Settings settings = indexSettings(1, 0).put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true).build(); - assertAcked(indicesAdmin().prepareCreate("index-1").setMapping("d", "type=date").setSettings(settings).get()); - assertAcked(indicesAdmin().prepareCreate("index-2").setMapping("d", "type=date").setSettings(settings).get()); - assertAcked(indicesAdmin().prepareCreate("index-3").setMapping("d", "type=date").setSettings(settings).get()); + assertAcked( + indicesAdmin().prepareCreate("index-1").setMapping("d", "type=date").setSettings(settings), + indicesAdmin().prepareCreate("index-2").setMapping("d", "type=date").setSettings(settings), + indicesAdmin().prepareCreate("index-3").setMapping("d", "type=date").setSettings(settings) + ); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time"); indexRandom( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java index de565605ff58a..7264585337fc7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java @@ -123,8 +123,9 @@ private void triggerClusterStateUpdates() { * Fetch the mappings and settings for {@link TestSystemIndexDescriptor#INDEX_NAME} and verify that they match the expected values. */ private void assertMappingsAndSettings(String expectedMappings) { - final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(INDEX_NAME)) - .actionGet(); + final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings( + new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME) + ).actionGet(); final Map mappings = getMappingsResponse.getMappings(); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java index 48aef0d348045..891b0319f880d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationIT.java @@ -47,7 +47,7 @@ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception PreBuiltAnalyzers preBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt]; String name = preBuiltAnalyzer.name().toLowerCase(Locale.ROOT); - IndexVersion randomVersion = IndexVersionUtils.randomVersion(random()); + IndexVersion randomVersion = IndexVersionUtils.randomWriteVersion(); if (loadedAnalyzers.containsKey(preBuiltAnalyzer) == false) { loadedAnalyzers.put(preBuiltAnalyzer, new ArrayList<>()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java deleted file mode 100644 index 9fd657b809f27..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.indices.mapping; - -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.index.IndexVersionUtils; -import org.elasticsearch.xcontent.XContentType; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.containsString; - -public class MalformedDynamicTemplateIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - /** - * Check that we can index a document into an 7.x index with a matching dynamic template that - * contains unknown parameters. We were able to create those templates in 7.x still, so we need - * to be able to index new documents into them. Indexing should issue a deprecation warning though. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - @AwaitsFix(bugUrl = "this is testing 7.x specific compatibility which may be n/a now after 9.0 bump") - public void testBWCMalformedDynamicTemplate() { - // this parameter is not supported by "keyword" field type - String mapping = """ - { "dynamic_templates": [ - { - "my_template": { - "mapping": { - "ignore_malformed": true, - "type": "keyword" - }, - "path_match": "*" - } - } - ] - } - }}"""; - String indexName = "malformed_dynamic_template"; - assertAcked( - prepareCreate(indexName).setSettings( - Settings.builder() - .put(indexSettings()) - .put("number_of_shards", 1) - .put("index.version.created", IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersions.V_8_0_0)) - ).setMapping(mapping) - ); - prepareIndex(indexName).setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); - assertNoFailures((indicesAdmin().prepareRefresh(indexName)).get()); - assertHitCount(prepareSearch(indexName), 1); - - MapperParsingException ex = expectThrows( - MapperParsingException.class, - prepareCreate("malformed_dynamic_template_8.0").setSettings( - Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", IndexVersion.current()) - ).setMapping(mapping) - ); - assertThat(ex.getMessage(), containsString("dynamic template [my_template] has invalid content")); - } - -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index e6927aefbc5cf..e3092bda185fe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -81,9 +81,10 @@ private XContentBuilder getMappingForType() throws IOException { } public void testGetFieldMappings() throws Exception { - - assertAcked(prepareCreate("indexa").setMapping(getMappingForType())); - assertAcked(indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType())); + assertAcked( + prepareCreate("indexa").setMapping(getMappingForType()), + indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType()) + ); // Get mappings by full name GetFieldMappingsResponse response = indicesAdmin().prepareGetFieldMappings("indexa").setFields("field1", "obj.subfield").get(); @@ -197,7 +198,7 @@ public void testGetFieldMappingsWithBlocks() throws Exception { try { enableIndexBlock("test", SETTING_BLOCKS_METADATA); - assertBlocked(indicesAdmin().prepareGetMappings(), INDEX_METADATA_BLOCK); + assertBlocked(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT), INDEX_METADATA_BLOCK); } finally { disableIndexBlock("test", SETTING_BLOCKS_METADATA); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index 20e59fab3bd0c..023aa402b7337 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -42,7 +42,7 @@ protected Collection> nodePlugins() { public void testGetMappingsWhereThereAreNone() { createIndex("index"); - GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get(); assertThat(response.mappings().containsKey("index"), equalTo(true)); assertEquals(MappingMetadata.EMPTY_MAPPINGS, response.mappings().get("index")); } @@ -70,19 +70,19 @@ public void testSimpleGetMappings() throws Exception { assertThat(clusterHealth.isTimedOut(), equalTo(false)); // Get all mappings - GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get(); assertThat(response.mappings().size(), equalTo(2)); assertThat(response.mappings().get("indexa"), notNullValue()); assertThat(response.mappings().get("indexb"), notNullValue()); // Get all mappings, via wildcard support - response = indicesAdmin().prepareGetMappings("*").get(); + response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "*").get(); assertThat(response.mappings().size(), equalTo(2)); assertThat(response.mappings().get("indexa"), notNullValue()); assertThat(response.mappings().get("indexb"), notNullValue()); // Get mappings in indexa - response = indicesAdmin().prepareGetMappings("indexa").get(); + response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "indexa").get(); assertThat(response.mappings().size(), equalTo(1)); assertThat(response.mappings().get("indexa"), notNullValue()); } @@ -94,7 +94,7 @@ public void testGetMappingsWithBlocks() throws IOException { for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { try { enableIndexBlock("test", block); - GetMappingsResponse response = indicesAdmin().prepareGetMappings().get(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get(); assertThat(response.mappings().size(), equalTo(1)); assertNotNull(response.mappings().get("test")); } finally { @@ -104,7 +104,7 @@ public void testGetMappingsWithBlocks() throws IOException { try { enableIndexBlock("test", SETTING_BLOCKS_METADATA); - assertBlocked(indicesAdmin().prepareGetMappings(), INDEX_METADATA_BLOCK); + assertBlocked(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT), INDEX_METADATA_BLOCK); } finally { disableIndexBlock("test", SETTING_BLOCKS_METADATA); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 6f6e488d46b23..fa2598348a1ce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -109,7 +109,7 @@ public void testUpdateMappingWithoutType() { assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get(); assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" {"_doc":{"properties":{"body":{"type":"text"},"date":{"type":"integer"}}}}""")); } @@ -123,7 +123,7 @@ public void testUpdateMappingWithoutTypeMultiObjects() { assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get(); + GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get(); assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo(""" {"_doc":{"properties":{"date":{"type":"integer"}}}}""")); } @@ -215,7 +215,10 @@ public void testUpdateMappingConcurrently() throws Throwable { .get(); assertThat(response.isAcknowledged(), equalTo(true)); - GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get(); + GetMappingsResponse getMappingResponse = client2.admin() + .indices() + .prepareGetMappings(TEST_REQUEST_TIMEOUT, indexName) + .get(); MappingMetadata mappings = getMappingResponse.getMappings().get(indexName); @SuppressWarnings("unchecked") Map properties = (Map) mappings.getSourceAsMap().get("properties"); @@ -284,7 +287,7 @@ private void assertConcreteMappingsOnAll(final String index, final String... fie * Waits for the given mapping type to exists on the master node. */ private void assertMappingOnMaster(final String index, final String... fieldNames) { - GetMappingsResponse response = indicesAdmin().prepareGetMappings(index).get(); + GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get(); MappingMetadata mappings = response.getMappings().get(index); assertThat(mappings, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 7d4269550bb88..fa1348c82d71a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -2050,7 +2050,7 @@ public void testPostRecoveryMergeDisabledOnOlderIndices() throws Exception { IndexMetadata.SETTING_VERSION_CREATED, IndexVersionUtils.randomVersionBetween( random(), - IndexVersionUtils.getFirstVersion(), + IndexVersionUtils.getLowestWriteCompatibleVersion(), IndexVersionUtils.getPreviousVersion(IndexVersions.MERGE_ON_RECOVERY_VERSION) ) ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java index 2aa834b98987a..a5d3dcaf104ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/IndexTemplateBlocksIT.java @@ -50,7 +50,7 @@ public void testIndexTemplatesWithBlocks() throws IOException { try { setClusterReadOnly(true); - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_blocks").get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template_blocks").get(); assertThat(response.getIndexTemplates(), hasSize(1)); assertBlocked( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 8496180e85d4e..ac3fc28aab307 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -79,7 +79,7 @@ public void testSimpleIndexTemplateTests() throws Exception { indicesAdmin().prepareDeleteTemplate("*").get(); // check get all templates on an empty index. - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), empty()); indicesAdmin().preparePutTemplate("template_1") @@ -145,7 +145,7 @@ public void testSimpleIndexTemplateTests() throws Exception { ); expectThrows(IllegalArgumentException.class, builder); - response = indicesAdmin().prepareGetTemplates().get(); + response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), hasSize(2)); // index something into test_index, will match on both templates @@ -292,7 +292,7 @@ public void testThatGetIndexTemplatesWorks() throws Exception { .get(); logger.info("--> get template template_1"); - GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1").get(); + GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template_1").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(1)); assertThat(getTemplate1Response.getIndexTemplates().get(0), is(notNullValue())); assertThat(getTemplate1Response.getIndexTemplates().get(0).patterns(), is(Collections.singletonList("te*"))); @@ -300,7 +300,8 @@ public void testThatGetIndexTemplatesWorks() throws Exception { assertThat(getTemplate1Response.getIndexTemplates().get(0).getVersion(), is(123)); logger.info("--> get non-existing-template"); - GetIndexTemplatesResponse getTemplate2Response = indicesAdmin().prepareGetTemplates("non-existing-template").get(); + GetIndexTemplatesResponse getTemplate2Response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "non-existing-template") + .get(); assertThat(getTemplate2Response.getIndexTemplates(), hasSize(0)); } @@ -375,7 +376,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { .get(); logger.info("--> get template template_*"); - GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates("template_*").get(); + GetIndexTemplatesResponse getTemplate1Response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template_*").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2)); List templateNames = new ArrayList<>(); @@ -384,7 +385,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { assertThat(templateNames, containsInAnyOrder("template_1", "template_2")); logger.info("--> get all templates"); - getTemplate1Response = indicesAdmin().prepareGetTemplates("template*").get(); + getTemplate1Response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template*").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(3)); templateNames = new ArrayList<>(); @@ -394,7 +395,7 @@ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception { assertThat(templateNames, containsInAnyOrder("template_1", "template_2", "template3")); logger.info("--> get templates template_1 and template_2"); - getTemplate1Response = indicesAdmin().prepareGetTemplates("template_1", "template_2").get(); + getTemplate1Response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template_1", "template_2").get(); assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2)); templateNames = new ArrayList<>(); @@ -419,7 +420,7 @@ public void testThatInvalidGetIndexTemplatesFails() throws Exception { private void testExpectActionRequestValidationException(String... names) { assertRequestBuilderThrows( - indicesAdmin().prepareGetTemplates(names), + indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, names), ActionRequestValidationException.class, "get template with " + Arrays.toString(names) ); @@ -430,7 +431,7 @@ public void testBrokenMapping() throws Exception { indicesAdmin().prepareDeleteTemplate("*").get(); // check get all templates on an empty index. - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), empty()); MapperParsingException e = expectThrows( @@ -441,7 +442,7 @@ public void testBrokenMapping() throws Exception { ); assertThat(e.getMessage(), containsString("Failed to parse mapping")); - response = indicesAdmin().prepareGetTemplates().get(); + response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), hasSize(0)); } @@ -450,7 +451,7 @@ public void testInvalidSettings() throws Exception { indicesAdmin().prepareDeleteTemplate("*").get(); // check get all templates on an empty index. - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), empty()); IllegalArgumentException e = expectThrows( @@ -465,7 +466,7 @@ public void testInvalidSettings() throws Exception { e.getMessage() ); - response = indicesAdmin().prepareGetTemplates().get(); + response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertEquals(0, response.getIndexTemplates().size()); createIndex("test"); @@ -494,7 +495,7 @@ public void testIndexTemplateWithAliases() throws Exception { prepareIndex("test_index").setId("4").setSource("type", "typeY", "field", "D value").get(); prepareIndex("test_index").setId("5").setSource("type", "typeZ", "field", "E value").get(); - GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases().setIndices("test_index").get(); + GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT).setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(4)); @@ -538,7 +539,7 @@ public void testIndexTemplateWithAliasesInSource() { assertAcked(prepareCreate("test_index")); ensureGreen(); - GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases().setIndices("test_index").get(); + GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT).setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(1)); @@ -574,7 +575,7 @@ public void testIndexTemplateWithAliasesSource() { assertAcked(prepareCreate("test_index")); ensureGreen(); - GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases().setIndices("test_index").get(); + GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT).setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(3)); @@ -597,7 +598,7 @@ public void testDuplicateAlias() throws Exception { .addAlias(new Alias("my_alias").filter(termQuery("field", "value2"))) .get(); - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_1").get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template_1").get(); assertThat(response.getIndexTemplates().size(), equalTo(1)); assertThat(response.getIndexTemplates().get(0).getAliases().size(), equalTo(1)); assertThat(response.getIndexTemplates().get(0).getAliases().get("my_alias").filter().string(), containsString("\"value1\"")); @@ -610,7 +611,7 @@ public void testAliasInvalidFilterValidJson() throws Exception { .addAlias(new Alias("invalid_alias").filter("{ \"invalid\": {} }")) .get(); - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_1").get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template_1").get(); assertThat(response.getIndexTemplates().size(), equalTo(1)); assertThat(response.getIndexTemplates().get(0).getAliases().size(), equalTo(1)); assertThat(response.getIndexTemplates().get(0).getAliases().get("invalid_alias").filter().string(), equalTo("{\"invalid\":{}}")); @@ -631,7 +632,7 @@ public void testAliasInvalidFilterInvalidJson() throws Exception { ); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_1").get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "template_1").get(); assertThat(response.getIndexTemplates().size(), equalTo(0)); } @@ -685,7 +686,7 @@ public void testMultipleAliasesPrecedence() throws Exception { ensureGreen(); - GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases().setIndices("test").get(); + GetAliasesResponse getAliasesResponse = indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(4)); for (AliasMetadata aliasMetadata : getAliasesResponse.getAliases().get("test")) { @@ -747,7 +748,7 @@ public void testCombineTemplates() throws Exception { indicesAdmin().prepareDeleteTemplate("*").get(); // check get all templates on an empty index. - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), empty()); // Now, a complete mapping with two separated templates is error @@ -789,7 +790,7 @@ public void testCombineTemplates() throws Exception { ); assertThat(e.getMessage(), containsString("analyzer [custom_1] has not been configured in mappings")); - response = indicesAdmin().prepareGetTemplates().get(); + response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), hasSize(1)); } @@ -806,7 +807,7 @@ public void testOrderAndVersion() { .setMapping("field", "type=text") ); - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("versioned_template").get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT, "versioned_template").get(); assertThat(response.getIndexTemplates().size(), equalTo(1)); assertThat(response.getIndexTemplates().get(0).getVersion(), equalTo(version)); assertThat(response.getIndexTemplates().get(0).getOrder(), equalTo(order)); @@ -856,7 +857,7 @@ public void testPartitionedTemplate() throws Exception { indicesAdmin().prepareDeleteTemplate("*").get(); // check get all templates on an empty index. - GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getIndexTemplates(), empty()); // provide more partitions than shards @@ -882,7 +883,7 @@ public void testPartitionedTemplate() throws Exception { assertThat(eBadMapping.getMessage(), containsString("must have routing required for partitioned index")); // no templates yet - response = indicesAdmin().prepareGetTemplates().get(); + response = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertEquals(0, response.getIndexTemplates().size()); // a valid configuration that only provides the partition size diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java index a8028e8671450..fee2c0494365e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java @@ -16,28 +16,23 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.FailingFieldPlugin; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import org.hamcrest.Matcher; import java.io.IOException; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; import static org.elasticsearch.index.mapper.DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; import static org.hamcrest.Matchers.equalTo; @@ -82,6 +77,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String STANDARD_INDEXING_COUNT = "es.indices.standard.indexing.total"; static final String STANDARD_INDEXING_TIME = "es.indices.standard.indexing.time"; static final String STANDARD_INDEXING_FAILURE = "es.indices.standard.indexing.failure.total"; + static final String STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.standard.indexing.failure.version_conflict.total"; static final String TIME_SERIES_INDEX_COUNT = "es.indices.time_series.total"; static final String TIME_SERIES_BYTES_SIZE = "es.indices.time_series.size"; @@ -95,6 +91,8 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String TIME_SERIES_INDEXING_COUNT = "es.indices.time_series.indexing.total"; static final String TIME_SERIES_INDEXING_TIME = "es.indices.time_series.indexing.time"; static final String TIME_SERIES_INDEXING_FAILURE = "es.indices.time_series.indexing.failure.total"; + static final String TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = + "es.indices.time_series.indexing.failure.version_conflict.total"; static final String LOGSDB_INDEX_COUNT = "es.indices.logsdb.total"; static final String LOGSDB_BYTES_SIZE = "es.indices.logsdb.size"; @@ -108,6 +106,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String LOGSDB_INDEXING_COUNT = "es.indices.logsdb.indexing.total"; static final String LOGSDB_INDEXING_TIME = "es.indices.logsdb.indexing.time"; static final String LOGSDB_INDEXING_FAILURE = "es.indices.logsdb.indexing.failure.total"; + static final String LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.logsdb.indexing.failure.version_conflict.total"; public void testIndicesMetrics() { String indexNode = internalCluster().startNode(); @@ -138,7 +137,9 @@ public void testIndicesMetrics() { STANDARD_INDEXING_TIME, greaterThanOrEqualTo(0L), STANDARD_INDEXING_FAILURE, - equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexCount()) + equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()), + STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount()) ) ); @@ -161,7 +162,9 @@ public void testIndicesMetrics() { TIME_SERIES_INDEXING_TIME, greaterThanOrEqualTo(0L), TIME_SERIES_INDEXING_FAILURE, - equalTo(indexing2.getIndexFailedCount() - indexing1.getIndexFailedCount()) + equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()), + TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount()) ) ); @@ -183,13 +186,14 @@ public void testIndicesMetrics() { LOGSDB_INDEXING_TIME, greaterThanOrEqualTo(0L), LOGSDB_INDEXING_FAILURE, - equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()) + equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()), + LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing3.getIndexFailedDueToVersionConflictCount() - indexing2.getIndexFailedDueToVersionConflictCount()) ) ); // already collected indexing stats - collectThenAssertMetrics( - telemetry, - 4, + Map> zeroMatchers = new HashMap<>(); + zeroMatchers.putAll( Map.of( STANDARD_INDEXING_COUNT, equalTo(0L), @@ -197,22 +201,35 @@ public void testIndicesMetrics() { equalTo(0L), STANDARD_INDEXING_FAILURE, equalTo(0L), - + STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(0L) + ) + ); + zeroMatchers.putAll( + Map.of( TIME_SERIES_INDEXING_COUNT, equalTo(0L), TIME_SERIES_INDEXING_TIME, equalTo(0L), TIME_SERIES_INDEXING_FAILURE, equalTo(0L), - + TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(0L) + ) + ); + zeroMatchers.putAll( + Map.of( LOGSDB_INDEXING_COUNT, equalTo(0L), LOGSDB_INDEXING_TIME, equalTo(0L), LOGSDB_INDEXING_FAILURE, + equalTo(0L), + LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, equalTo(0L) ) ); + collectThenAssertMetrics(telemetry, 4, zeroMatchers); String searchNode = internalCluster().startDataOnlyNode(); indicesService = internalCluster().getInstance(IndicesService.class, searchNode); telemetry = internalCluster().getInstance(PluginsService.class, searchNode) @@ -455,48 +472,4 @@ private Map parseMapping(String mapping) throws IOException { return parser.map(); } } - - public static class FailingFieldPlugin extends Plugin implements ScriptPlugin { - - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "failing_field"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - throw new IllegalStateException("Accessing failing field"); - } - }; - } - }; - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java index e4d44212f2854..04130d176b9e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java @@ -9,14 +9,17 @@ package org.elasticsearch.monitor.metrics; +import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -25,6 +28,13 @@ import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; @@ -43,13 +53,16 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import static java.util.Collections.singletonMap; import static org.elasticsearch.index.IndexingPressure.MAX_COORDINATING_BYTES; import static org.elasticsearch.index.IndexingPressure.MAX_PRIMARY_BYTES; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) @@ -66,7 +79,7 @@ public List> getSettings() { @Override protected Collection> nodePlugins() { - return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class); + return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class, TestAnalysisPlugin.class); } @Override @@ -77,6 +90,197 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .build(); } + public void testZeroMetricsForVersionConflictsForNonIndexingOperations() { + final String dataNode = internalCluster().startNode(); + ensureStableCluster(1); + + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + plugin.resetMeter(); + + assertAcked(prepareCreate("index_no_refresh", Settings.builder().put("index.refresh_interval", "-1"))); + assertAcked(prepareCreate("index_with_default_refresh")); + + for (String indexName : List.of("index_no_refresh", "index_with_default_refresh")) { + String docId = randomUUID(); + client(dataNode).index(new IndexRequest(indexName).id(docId).source(Map.of())).actionGet(); + // test version conflicts are counted when getting from the translog + if (randomBoolean()) { + // this get has the side effect of tracking translog location in the live version map, + // which potentially influences the engine conflict exception path + client(dataNode).get(new GetRequest(indexName, docId).realtime(randomBoolean())).actionGet(); + } + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).get( + new GetRequest(indexName, docId).version(10).versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + if (randomBoolean()) { + client(dataNode).get(new GetRequest(indexName, docId).realtime(false)).actionGet(); + } + client(dataNode).admin().indices().prepareRefresh(indexName).get(); + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).get( + new GetRequest(indexName, docId).version(5) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + .realtime(false) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // updates + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).update( + new UpdateRequest(indexName, docId).setIfPrimaryTerm(1) + .setIfSeqNo(randomIntBetween(2, 5)) + .doc(Map.of(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // deletes + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).delete( + new DeleteRequest(indexName, docId).setIfPrimaryTerm(randomIntBetween(2, 5)).setIfSeqNo(0) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + } + + // simulate async apm `polling` call for metrics + plugin.collect(); + + // there are no indexing (version conflict) failures reported because only gets/updates/deletes generated the conflicts + // and those are not "indexing" operations + var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); + assertThat(indexingFailedTotal.getLong(), equalTo(0L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L)); + } + + public void testMetricsForIndexingVersionConflicts() { + final String dataNode = internalCluster().startNode(); + ensureStableCluster(1); + + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + plugin.resetMeter(); + + assertAcked( + prepareCreate( + "test", + Settings.builder() + .put("index.refresh_interval", "-1") + .put("index.analysis.analyzer.test_analyzer.type", "custom") + .put("index.analysis.analyzer.test_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.test_analyzer.filter", "test_token_filter") + ).setMapping(Map.of("properties", Map.of("test_field", Map.of("type", "text", "analyzer", "test_analyzer")))).get() + ); + + String docId = randomUUID(); + // successful index (with version) + client(dataNode).index( + new IndexRequest("test").id(docId) + .version(10) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + .source(Map.of()) + ).actionGet(); + // if_primary_term conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(0).setIfPrimaryTerm(2)) + .actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // if_seq_no conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(1).setIfPrimaryTerm(1)) + .actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // version conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index( + new IndexRequest("test").id(docId) + .source(Map.of()) + .version(3) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // indexing failure that is NOT a version conflict + PluginsService pluginService = internalCluster().getInstance(PluginsService.class, dataNode); + pluginService.filterPlugins(TestAnalysisPlugin.class).forEach(p -> p.throwParsingError.set(true)); + { + var e = expectThrows( + MapperParsingException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId + "other").source(Map.of("test_field", "this will error"))) + .actionGet() + ); + assertThat(e.status(), is(RestStatus.BAD_REQUEST)); + } + + plugin.collect(); + + var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); + assertThat(indexingFailedTotal.getLong(), equalTo(4L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(3L)); + } + + public static final class TestAnalysisPlugin extends Plugin implements AnalysisPlugin { + final AtomicBoolean throwParsingError = new AtomicBoolean(false); + + @Override + public Map> getTokenFilters() { + return singletonMap("test_token_filter", (indexSettings, environment, name, settings) -> new AbstractTokenFilterFactory(name) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (throwParsingError.get()) { + throw new MapperParsingException("simulate mapping parsing error"); + } + return tokenStream; + } + }); + } + } + public void testNodeIndexingMetricsArePublishing() { final String dataNode = internalCluster().startNode(); @@ -116,6 +320,11 @@ public void testNodeIndexingMetricsArePublishing() { var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); assertThat(indexingFailedTotal.getLong(), equalTo(0L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L)); var deletionTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.deletion.docs.total"); assertThat(deletionTotal.getLong(), equalTo((long) deletesCount)); @@ -336,8 +545,10 @@ public void testPrimaryDocumentRejectionMetricsArePublishing() { plugin.resetMeter(); final int numberOfShards = randomIntBetween(1, 5); - assertAcked(prepareCreate("test-one", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)).get()); - assertAcked(prepareCreate("test-two", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)).get()); + assertAcked( + prepareCreate("test-one", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)), + prepareCreate("test-two", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)) + ); final BulkRequest bulkRequestOne = new BulkRequest(); final int batchCountOne = randomIntBetween(50, 100); @@ -397,8 +608,10 @@ public void testPrimaryDocumentRejectionMetricsFluctuatingOverTime() throws Exce ensureStableCluster(2); // for simplicity do not mix small and big documents in single index/shard - assertAcked(prepareCreate("test-index-one", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)).get()); - assertAcked(prepareCreate("test-index-two", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)).get()); + assertAcked( + prepareCreate("test-index-one", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)), + prepareCreate("test-index-two", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)) + ); final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode) .filterPlugins(TestTelemetryPlugin.class) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 77c4f8a26f478..43bca39c02ce5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -16,8 +16,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.CollectionUtils; @@ -365,11 +365,10 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, ); } - ClusterService clusterService = clusterService(); - final ClusterState state = clusterService.state(); + final ClusterState state = clusterService().state(); for (int shard = 0; shard < numberOfShards; shard++) { for (String id : ids) { - ShardId docShard = clusterService.operationRouting().shardId(state, "test", id, null); + ShardId docShard = OperationRouting.shardId(state, "test", id, null); if (docShard.id() == shard) { final IndexShardRoutingTable indexShardRoutingTable = state.routingTable().shardRoutingTable("test", shard); for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index 4460bd2e0c302..c10217b93c868 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Strings; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -109,8 +108,6 @@ public void testGetShardSnapshotOnEmptyRepositoriesListThrowsAnError() { expectThrows(IllegalArgumentException.class, () -> getLatestSnapshotForShardFuture(Collections.emptyList(), "idx", 0, false)); } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - // below we were selecting an index version between current and 7.5.0, this has been updated to 8.0.0 now but that might need to change public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exception { final String repoName = "repo-name"; final Path repoPath = randomRepoPath(); @@ -118,7 +115,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final boolean useBwCFormat = randomBoolean(); if (useBwCFormat) { - final IndexVersion version = randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); + final IndexVersion version = randomVersionBetween(random(), IndexVersions.V_7_5_0, IndexVersion.current()); initWithSnapshotVersion(repoName, repoPath, version); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 8e0dee2396411..c4c19dd8748a6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -456,14 +456,14 @@ private void assertComponentAndIndexTemplateDelete(CountDownLatch savedClusterSt final var response = client().execute( GetComposableIndexTemplateAction.INSTANCE, - new GetComposableIndexTemplateAction.Request("template*") + new GetComposableIndexTemplateAction.Request(TEST_REQUEST_TIMEOUT, "template*") ).get(); assertThat(response.indexTemplates().keySet().stream().collect(Collectors.toSet()), containsInAnyOrder("template_1", "template_2")); final var componentResponse = client().execute( GetComponentTemplateAction.INSTANCE, - new GetComponentTemplateAction.Request("other*") + new GetComponentTemplateAction.Request(TEST_REQUEST_TIMEOUT, "other*") ).get(); assertTrue(componentResponse.getComponentTemplates().isEmpty()); @@ -594,7 +594,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic final var response = client().execute( GetComposableIndexTemplateAction.INSTANCE, - new GetComposableIndexTemplateAction.Request("err*") + new GetComposableIndexTemplateAction.Request(TEST_REQUEST_TIMEOUT, "err*") ).get(); assertTrue(response.indexTemplates().isEmpty()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java index 199c9a9fb4c8c..e14c7c8368917 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -123,19 +123,25 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with correct routing, should find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> indexing with id [2], and routing [1] using alias"); @@ -143,50 +149,71 @@ public void testAliasSearchRouting() throws Exception { logger.info("--> search with no routing, should fine two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch().setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with 0 routing, should find one"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with 1 routing, should find one"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with 0,1 indexRoutings , should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with two routing aliases , should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with alias0, alias1 and alias01, should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias0", "alias1", "alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias0", "alias1", "alias01").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with test, alias0 and alias1, should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("test", "alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("test", "alias0", "alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } } @@ -236,20 +263,29 @@ public void testAliasSearchRoutingWithTwoIndices() throws Exception { logger.info("--> search with alias-a1,alias-b0, should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias-a1", "alias-b0").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias-a1", "alias-b0").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with alias-ab, should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias-ab").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias-ab").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with alias-a0,alias-b1 should find two"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias-a0", "alias-b1").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias-a0", "alias-b1").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } } @@ -317,8 +353,11 @@ public void testIndexingAliasesOverTime() throws Exception { logger.info("--> verifying get and search with routing, should find"); for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); - assertHitCount(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> creating alias with routing [4]"); @@ -326,8 +365,11 @@ public void testIndexingAliasesOverTime() throws Exception { logger.info("--> verifying search with wrong routing should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> creating alias with search routing [3,4] and index routing 4"); @@ -344,8 +386,11 @@ public void testIndexingAliasesOverTime() throws Exception { for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "0").setRouting("3").get().isExists(), equalTo(true)); assertThat(client().prepareGet("test", "1").setRouting("4").get().isExists(), equalTo(true)); - assertHitCount(prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), 2); - assertHitCount(prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()), 2); + assertHitCount( + 2, + prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch("alias").setSize(0).setQuery(QueryBuilders.matchAllQuery()) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java index 246138fa5573d..06d850e0d9abc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -141,14 +141,20 @@ public void testSimpleSearchRouting() { logger.info("--> search with wrong routing, should not find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); - assertHitCount(prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()), 0); + assertHitCount( + 0, + prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()) + ); } logger.info("--> search with correct routing, should find"); for (int i = 0; i < 5; i++) { - assertHitCount(prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()), 1); - assertHitCount(prepareSearch().setSize(0).setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()), 1); + assertHitCount( + 1, + prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()), + prepareSearch().setSize(0).setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()) + ); } String secondRoutingValue = "1"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 907f943e68422..6c67bd2a98606 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -56,7 +56,7 @@ protected boolean forbidPrivateIndexSettings() { return false; } - private final IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + private final IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 1ad7d1a11bea7..1de51d6df8197 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -49,7 +49,7 @@ protected boolean forbidPrivateIndexSettings() { return false; } - private final IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + private final IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); static Map expectedDocCountsForGeoHash = null; static Map multiValuedExpectedDocCountsForGeoHash = null; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 80c47d6180db0..affa371d92aa9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -119,10 +119,10 @@ public static String randomExecutionHint() { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("idx").setMapping(TERMS_AGGS_FIELD, "type=keyword", "text", "type=text,store=true")); - assertAcked(prepareCreate("field-collapsing").setMapping("group", "type=keyword")); - createIndex("empty"); assertAcked( + prepareCreate("idx").setMapping(TERMS_AGGS_FIELD, "type=keyword", "text", "type=text,store=true"), + prepareCreate("field-collapsing").setMapping("group", "type=keyword"), + prepareCreate("empty"), prepareCreate("articles").setMapping( jsonBuilder().startObject() .startObject("_doc") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java index f4aa34c9b47b7..965fd0d8011c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java @@ -154,8 +154,10 @@ public void testResolvedIndices_TransportSearchAction() { public void testResolvedIndices_TransportExplainAction() { final String[] indices = { "test1", "test2" }; createIndex(indices); - assertAcked(indicesAdmin().prepareAliases().addAlias("test1", "alias1")); - assertAcked(indicesAdmin().prepareAliases().addAlias(indices, "alias2")); + assertAcked( + indicesAdmin().prepareAliases().addAlias("test1", "alias1"), + indicesAdmin().prepareAliases().addAlias(indices, "alias2") + ); assertResolvedIndices(client().prepareExplain("test1", "1"), Set.of("test1"), Set.of("test1"), r -> {}); assertResolvedIndices(client().prepareExplain("alias1", "1"), Set.of("alias1"), Set.of("test1"), r -> {}); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java index 9c1daccd2cc9e..ab79fd7ba1813 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -40,18 +40,12 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.SkipUnavailableRule; +import org.elasticsearch.test.SkipUnavailableRule.NotSkipped; import org.elasticsearch.usage.UsageService; import org.junit.Assert; import org.junit.Rule; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.Arrays; + import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -59,8 +53,6 @@ import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.ASYNC_FEATURE; import static org.elasticsearch.action.admin.cluster.stats.CCSUsageTelemetry.MRT_FEATURE; @@ -498,7 +490,7 @@ public void testRemoteOnlyTimesOut() throws Exception { assertThat(perCluster.get(REMOTE2), equalTo(null)); } - @SkipOverride(aliases = { REMOTE1 }) + @NotSkipped(aliases = { REMOTE1 }) public void testRemoteTimesOutFailure() throws Exception { Map testClusterInfo = setupClusters(); String remoteIndex = (String) testClusterInfo.get("remote.index"); @@ -528,7 +520,7 @@ public void testRemoteTimesOutFailure() throws Exception { /** * Search when all the remotes failed and not skipped */ - @SkipOverride(aliases = { REMOTE1, REMOTE2 }) + @NotSkipped(aliases = { REMOTE1, REMOTE2 }) public void testFailedAllRemotesSearch() throws Exception { Map testClusterInfo = setupClusters(); String localIndex = (String) testClusterInfo.get("local.index"); @@ -577,7 +569,7 @@ public void testRemoteHasNoIndex() throws Exception { /** * Test that we're still counting remote search even if remote cluster has no such index */ - @SkipOverride(aliases = { REMOTE1 }) + @NotSkipped(aliases = { REMOTE1 }) public void testRemoteHasNoIndexFailure() throws Exception { SearchRequest searchRequest = makeSearchRequest(REMOTE1 + ":no_such_index"); CCSTelemetrySnapshot telemetry = getTelemetryFromFailedSearch(searchRequest); @@ -695,40 +687,4 @@ private void indexDocs(Client client, String index, ActionListener listene bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).execute(listener.safeMap(r -> null)); } - /** - * Annotation to mark specific cluster in a test as not to be skipped when unavailable - */ - @Retention(RetentionPolicy.RUNTIME) - @Target(ElementType.METHOD) - @interface SkipOverride { - String[] aliases(); - } - - /** - * Test rule to process skip annotations - */ - static class SkipUnavailableRule implements TestRule { - private final Map skipMap; - - SkipUnavailableRule(String... clusterAliases) { - this.skipMap = Arrays.stream(clusterAliases).collect(Collectors.toMap(Function.identity(), alias -> true)); - } - - public Map getMap() { - return skipMap; - } - - @Override - public Statement apply(Statement base, Description description) { - // Check for annotation named "SkipOverride" and set the overrides accordingly - var aliases = description.getAnnotation(SkipOverride.class); - if (aliases != null) { - for (String alias : aliases.aliases()) { - skipMap.put(alias, false); - } - } - return base; - } - - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index 57a9f8131ac2d..21dca6a35659a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -183,7 +183,11 @@ public void testProxyConnectionDisconnect() throws Exception { } } }); - assertBusy(() -> assertTrue(future.isDone())); + try { + future.get(); + } catch (ExecutionException e) { + // ignored + } configureAndConnectsToRemoteClusters(); } finally { SearchListenerPlugin.allowQueryPhase(); @@ -298,20 +302,21 @@ public void testCancel() throws Exception { } SearchListenerPlugin.allowQueryPhase(); - assertBusy(() -> assertTrue(queryFuture.isDone())); - assertBusy(() -> assertTrue(cancelFuture.isDone())); + try { + queryFuture.get(); + fail("query should have failed"); + } catch (ExecutionException e) { + assertNotNull(e.getCause()); + Throwable t = ExceptionsHelper.unwrap(e, TaskCancelledException.class); + assertNotNull(t); + } + cancelFuture.get(); assertBusy(() -> { final Iterable transportServices = cluster("cluster_a").getInstances(TransportService.class); for (TransportService transportService : transportServices) { assertThat(transportService.getTaskManager().getBannedTaskIds(), Matchers.empty()); } }); - - ExecutionException e = expectThrows(ExecutionException.class, () -> queryFuture.result()); - assertNotNull(e); - assertNotNull(e.getCause()); - Throwable t = ExceptionsHelper.unwrap(e, TaskCancelledException.class); - assertNotNull(t); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 823d3198bc7a2..d4f60a868dcd4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -379,11 +379,9 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E r.incRef(); l.onResponse(r); })); - assertBusy(() -> assertTrue(queryFuture.isDone())); - // dfs=true overrides the minimize_roundtrips=true setting and does not minimize roundtrips if (skipUnavailable == false && minimizeRoundtrips && dfs == false) { - ExecutionException ee = expectThrows(ExecutionException.class, () -> queryFuture.get()); + ExecutionException ee = expectThrows(ExecutionException.class, queryFuture::get); assertNotNull(ee.getCause()); assertThat(ee.getCause(), instanceOf(RemoteTransportException.class)); Throwable rootCause = ExceptionsHelper.unwrap(ee.getCause(), IllegalStateException.class); @@ -622,10 +620,8 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { r.incRef(); l.onResponse(r); })); - assertBusy(() -> assertTrue(queryFuture.isDone())); - if (skipUnavailable == false || minimizeRoundtrips == false) { - ExecutionException ee = expectThrows(ExecutionException.class, () -> queryFuture.get()); + ExecutionException ee = expectThrows(ExecutionException.class, queryFuture::get); assertNotNull(ee.getCause()); Throwable rootCause = ExceptionsHelper.unwrap(ee, IllegalStateException.class); assertThat(rootCause.getMessage(), containsString("index corrupted")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index fc105d3d4fcd2..ee04ae624b16f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -2632,6 +2632,41 @@ public void testPostingsHighlighterOrderByScore() throws Exception { }); } + public void testMaxQueryOffsetDefault() throws Exception { + assertAcked( + prepareCreate("test").setMapping(type1PostingsffsetsMapping()) + .setSettings(Settings.builder().put("index.highlight.max_analyzed_offset", "10").build()) + ); + ensureGreen(); + + prepareIndex("test").setSource( + "field1", + new String[] { + "This sentence contains one match, not that short. This sentence contains zero sentence matches. " + + "This one contains no matches.", + "This is the second value's first sentence. This one contains no matches. " + + "This sentence contains three sentence occurrences (sentence).", + "One sentence match here and scored lower since the text is quite long, not that appealing. " + + "This one contains no matches." } + ).get(); + refresh(); + + // Specific for this test: by passing "-1" as "maxAnalyzedOffset", the index highlight setting above will be used. + SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) + .highlighter(highlight().field("field1").order("score").maxAnalyzedOffset(-1)); + + assertResponse(client().search(new SearchRequest("test").source(source)), response -> { + Map highlightFieldMap = response.getHits().getAt(0).getHighlightFields(); + assertThat(highlightFieldMap.size(), equalTo(1)); + HighlightField field1 = highlightFieldMap.get("field1"); + assertThat(field1.fragments().length, equalTo(1)); + assertThat( + field1.fragments()[0].string(), + equalTo("This sentence contains one match, not that short. This sentence contains zero sentence matches.") + ); + }); + } + public void testPostingsHighlighterEscapeHtml() throws Exception { assertAcked(prepareCreate("test").setMapping("title", "type=text," + randomStoreField() + "index_options=offsets")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 6cca07bfacc9e..fe49ce57d0400 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; @@ -380,8 +381,10 @@ public void testTwoIndicesOneClosedIgnoreUnavailable() { } public void testWithIndexFilter() throws InterruptedException { - assertAcked(prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword")); - assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long")); + assertAcked( + prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword"), + prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long") + ); List reqs = new ArrayList<>(); reqs.add(prepareIndex("index-1").setSource("timestamp", "2015-07-08")); @@ -474,8 +477,7 @@ public void testFieldMetricsAndDimensions() { public void testFailures() throws InterruptedException { // in addition to the existing "old_index" and "new_index", create two where the test query throws an error on rewrite - assertAcked(prepareCreate("index1-error")); - assertAcked(prepareCreate("index2-error")); + assertAcked(prepareCreate("index1-error"), prepareCreate("index2-error")); ensureGreen("index1-error", "index2-error"); FieldCapabilitiesResponse response = client().prepareFieldCaps() .setFields("*") @@ -503,9 +505,7 @@ private void populateTimeRangeIndices() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); assertAcked( prepareCreate("log-index-1").setSettings(indexSettings(between(1, 5), 1)) - .setMapping("timestamp", "type=date", "field1", "type=keyword") - ); - assertAcked( + .setMapping("timestamp", "type=date", "field1", "type=keyword"), prepareCreate("log-index-2").setSettings(indexSettings(between(1, 5), 1)) .setMapping("timestamp", "type=date", "field1", "type=long") ); @@ -666,9 +666,11 @@ public void testManyIndicesWithSameMapping() { } """; String[] indices = IntStream.range(0, between(1, 9)).mapToObj(n -> "test_many_index_" + n).toArray(String[]::new); - for (String index : indices) { - assertAcked(indicesAdmin().prepareCreate(index).setMapping(mapping).get()); - } + assertAcked( + Arrays.stream(indices) + .map(index -> indicesAdmin().prepareCreate(index).setMapping(mapping)) + .toArray(CreateIndexRequestBuilder[]::new) + ); FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); request.indices("test_many_index_*"); request.fields("*"); @@ -787,9 +789,11 @@ public void testIndexMode() throws Exception { Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("hostname")).build(); int numIndices = between(1, 5); for (int i = 0; i < numIndices; i++) { - assertAcked(indicesAdmin().prepareCreate("test_metrics_" + i).setSettings(settings).setMapping(metricsMapping).get()); + assertAcked( + indicesAdmin().prepareCreate("test_metrics_" + i).setSettings(settings).setMapping(metricsMapping), + indicesAdmin().prepareCreate("test_old_metrics_" + i).setMapping(metricsMapping) + ); indexModes.put("test_metrics_" + i, IndexMode.TIME_SERIES); - assertAcked(indicesAdmin().prepareCreate("test_old_metrics_" + i).setMapping(metricsMapping).get()); indexModes.put("test_old_metrics_" + i, IndexMode.STANDARD); } } @@ -808,9 +812,11 @@ public void testIndexMode() throws Exception { Settings settings = Settings.builder().put("mode", "logsdb").build(); int numIndices = between(1, 5); for (int i = 0; i < numIndices; i++) { - assertAcked(indicesAdmin().prepareCreate("test_logs_" + i).setSettings(settings).setMapping(logsMapping).get()); + assertAcked( + indicesAdmin().prepareCreate("test_logs_" + i).setSettings(settings).setMapping(logsMapping), + indicesAdmin().prepareCreate("test_old_logs_" + i).setMapping(logsMapping) + ); indexModes.put("test_logs_" + i, IndexMode.LOGSDB); - assertAcked(indicesAdmin().prepareCreate("test_old_logs_" + i).setMapping(logsMapping).get()); indexModes.put("test_old_logs_" + i, IndexMode.STANDARD); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index 9988624f6a677..a55edf3782bcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -748,7 +748,7 @@ public void testDateWithoutOrigin() throws Exception { } public void testManyDocsLin() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = jsonBuilder().startObject() .startObject("_doc") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index a7efb2fe0e68b..fbdcfe26d28ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.rescore.QueryRescoreMode; import org.elasticsearch.search.rescore.QueryRescorerBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.ParseField; @@ -840,6 +841,20 @@ public void testRescorePhaseWithInvalidSort() throws Exception { } } ); + + assertResponse( + prepareSearch().addSort(SortBuilders.scoreSort()) + .addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)) + .setTrackScores(true) + .addRescorer(new QueryRescorerBuilder(matchAllQuery()).setRescoreQueryWeight(100.0f), 50), + response -> { + assertThat(response.getHits().getTotalHits().value(), equalTo(5L)); + assertThat(response.getHits().getHits().length, equalTo(5)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(101f)); + } + } + ); } record GroupDoc(String id, String group, float firstPassScore, float secondPassScore, boolean shouldFilter) {} @@ -879,6 +894,10 @@ public void testRescoreAfterCollapse() throws Exception { .setQuery(fieldValueScoreQuery("firstPassScore")) .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore"))) .setCollapse(new CollapseBuilder("group")); + if (randomBoolean()) { + request.addSort(SortBuilders.scoreSort()); + request.addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + } assertResponse(request, resp -> { assertThat(resp.getHits().getTotalHits().value(), equalTo(5L)); assertThat(resp.getHits().getHits().length, equalTo(3)); @@ -958,6 +977,10 @@ public void testRescoreAfterCollapseRandom() throws Exception { .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore")).setQueryWeight(0f).windowSize(numGroups)) .setCollapse(new CollapseBuilder("group")) .setSize(Math.min(numGroups, 10)); + if (randomBoolean()) { + request.addSort(SortBuilders.scoreSort()); + request.addSort(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); + } long expectedNumHits = numHits; assertResponse(request, resp -> { assertThat(resp.getHits().getTotalHits().value(), equalTo(expectedNumHits)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java index 2489889be19e5..8104e4ed7a825 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java @@ -32,6 +32,6 @@ public XContentBuilder getMapping() throws IOException { @Override public IndexVersion randomSupportedVersion() { - return IndexVersionUtils.randomCompatibleVersion(random()); + return IndexVersionUtils.randomCompatibleWriteVersion(random()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 9b4e28055a988..a309fa81f6dc1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -96,7 +96,7 @@ protected boolean forbidPrivateIndexSettings() { @Before public void setupTestIndex() throws IOException { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java index 4b8f29f3cc9a5..aadefd9bd8018 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPolygonIT.java @@ -39,7 +39,7 @@ protected boolean forbidPrivateIndexSettings() { @Override protected void setupSuiteScopeCluster() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index f790cf30e1c0e..6d1f2784a739c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -164,22 +164,19 @@ public void testIssue3177() { forceMerge(); refresh(); assertHitCount( + 3L, prepareSearch().setQuery(matchAllQuery()) .setPostFilter( boolQuery().must(matchAllQuery()) .must(boolQuery().mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) ), - 3L - ); - assertHitCount( prepareSearch().setQuery( boolQuery().must( boolQuery().should(termQuery("field1", "value1")) .should(termQuery("field1", "value2")) .should(termQuery("field1", "value3")) ).filter(boolQuery().mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) - ), - 3L + ) ); assertHitCount(prepareSearch().setQuery(matchAllQuery()).setPostFilter(boolQuery().mustNot(termQuery("field1", "value3"))), 2L); } @@ -309,11 +306,14 @@ public void testQueryStringAnalyzedWildcard() throws Exception { prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - assertHitCount(prepareSearch().setQuery(queryStringQuery("value*")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("*ue*")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("*ue_1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("val*e_1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("v?l*e?1")), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("value*")), + prepareSearch().setQuery(queryStringQuery("*ue*")), + prepareSearch().setQuery(queryStringQuery("*ue_1")), + prepareSearch().setQuery(queryStringQuery("val*e_1")), + prepareSearch().setQuery(queryStringQuery("v?l*e?1")) + ); } public void testLowercaseExpandedTerms() { @@ -322,10 +322,13 @@ public void testLowercaseExpandedTerms() { prepareIndex("test").setId("1").setSource("field1", "value_1", "field2", "value_2").get(); refresh(); - assertHitCount(prepareSearch().setQuery(queryStringQuery("VALUE_3~1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("ValUE_*")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("vAl*E_1")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]")), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("VALUE_3~1")), + prepareSearch().setQuery(queryStringQuery("ValUE_*")), + prepareSearch().setQuery(queryStringQuery("vAl*E_1")), + prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]")) + ); } // Issue #3540 @@ -340,8 +343,11 @@ public void testDateRangeInQueryString() { prepareIndex("test").setId("1").setSource("past", aMonthAgo, "future", aMonthFromNow).get(); refresh(); - assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")), 1L); - assertHitCount(prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]")), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")), + prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]")) + ); SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, @@ -377,21 +383,17 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { refresh(); // Timezone set with dates - assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")), 2L); - - // Same timezone set with time_zone assertHitCount( - prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]").timeZone("+0200")), - 2L + 2L, + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")), + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]").timeZone("+0200")) ); // We set a timezone which will give no result - assertHitCount(prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]")), 0L); - - // Same timezone set with time_zone but another timezone is set directly within dates which has the precedence assertHitCount( - prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]").timeZone("+0200")), - 0L + 0L, + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]")), + prepareSearch().setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]").timeZone("+0200")) ); } @@ -502,13 +504,12 @@ public void testPassQueryOrFilterAsJSONString() throws Exception { prepareIndex("test").setId("1").setSource("field1", "value1_1", "field2", "value2_1").setRefreshPolicy(IMMEDIATE).get(); WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }"); - assertHitCount(prepareSearch().setQuery(wrapper), 1L); - - BoolQueryBuilder bool = boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1")); - assertHitCount(prepareSearch().setQuery(bool), 1L); - - WrapperQueryBuilder wrapperFilter = wrapperQuery("{ \"term\" : { \"field1\" : \"value1_1\" } }"); - assertHitCount(prepareSearch().setPostFilter(wrapperFilter), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(wrapper), + prepareSearch().setQuery(boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1"))), + prepareSearch().setPostFilter(wrapperQuery("{ \"term\" : { \"field1\" : \"value1_1\" } }")) + ); } public void testFiltersWithCustomCacheKey() throws Exception { @@ -516,10 +517,13 @@ public void testFiltersWithCustomCacheKey() throws Exception { prepareIndex("test").setId("1").setSource("field1", "value1").get(); refresh(); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); - assertHitCount(prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), 1L); + assertHitCount( + 1L, + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))), + prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))) + ); } public void testMatchQueryNumeric() throws Exception { @@ -994,13 +998,14 @@ public void testTermsLookupFilter() throws Exception { "4" ); - assertHitCount(prepareSearch("test").setQuery(termsLookupQuery("not_exists", new TermsLookup("lookup2", "3", "arr.term"))), 0L); - - // index "lookup" id "missing" document does not exist: ignore the lookup terms - assertHitCount(prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup", "missing", "terms"))), 0L); - - // index "lookup3" has the source disabled: ignore the lookup terms - assertHitCount(prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup3", "1", "terms"))), 0L); + assertHitCount( + 0L, + prepareSearch("test").setQuery(termsLookupQuery("not_exists", new TermsLookup("lookup2", "3", "arr.term"))), + // index "lookup" id "missing" document does not exist: ignore the lookup terms + prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup", "missing", "terms"))), + // index "lookup3" has the source disabled: ignore the lookup terms + prepareSearch("test").setQuery(termsLookupQuery("term", new TermsLookup("lookup3", "1", "terms"))) + ); } public void testBasicQueryById() throws Exception { @@ -1120,18 +1125,14 @@ public void testNumericRangeFilter_2826() throws Exception { refresh(); assertHitCount( + 4L, prepareSearch("test").setPostFilter( boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4)) ), - 4L - ); - - // This made 2826 fail! (only with bit based filters) - assertHitCount( + // This made 2826 fail! (only with bit based filters) prepareSearch("test").setPostFilter( boolQuery().should(rangeQuery("num_long").from(1).to(2)).should(rangeQuery("num_long").from(3).to(4)) - ), - 4L + ) ); // This made #2979 fail! @@ -1697,11 +1698,11 @@ public void testWildcardQueryNormalizationOnKeywordField() { refresh(); { - WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); - - wildCardQuery = wildcardQuery("field1", "bb*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(wildcardQuery("field1", "Bb*")), + prepareSearch().setQuery(wildcardQuery("field1", "bb*")) + ); } } @@ -1725,12 +1726,12 @@ public void testWildcardQueryNormalizationOnTextField() { WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); assertHitCount(prepareSearch().setQuery(wildCardQuery), 0L); - // the following works not because of normalization but because of the `case_insensitive` parameter - wildCardQuery = wildcardQuery("field1", "Bb*").caseInsensitive(true); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); - - wildCardQuery = wildcardQuery("field1", "bb*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); + assertHitCount( + 1L, + // the following works not because of normalization but because of the `case_insensitive` parameter + prepareSearch().setQuery(wildcardQuery("field1", "Bb*").caseInsensitive(true)), + prepareSearch().setQuery(wildcardQuery("field1", "bb*")) + ); } } @@ -1751,11 +1752,11 @@ public void testWildcardQueryNormalizationKeywordSpecialCharacters() { prepareIndex("test").setId("1").setSource("field", "label-1").get(); refresh(); - WildcardQueryBuilder wildCardQuery = wildcardQuery("field", "la*"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); - - wildCardQuery = wildcardQuery("field", "la*el-?"); - assertHitCount(prepareSearch().setQuery(wildCardQuery), 1L); + assertHitCount( + 1L, + prepareSearch().setQuery(wildcardQuery("field", "la*")), + prepareSearch().setQuery(wildcardQuery("field", "la*el-?")) + ); } public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index e079994003751..37e299c816562 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.sort.ShardDocSortField; @@ -97,14 +96,14 @@ public void testSearchSort() throws Exception { int fetchSize = randomIntBetween(10, 100); // test _doc sort SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); assertSearchSlicesWithScroll(request, field, max, numDocs); // test numeric sort request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .addSort(SortBuilders.fieldSort("random_int")) .setSize(fetchSize); assertSearchSlicesWithScroll(request, field, max, numDocs); @@ -121,7 +120,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .setPreference("_shards:1,4") .addSort(SortBuilders.fieldSort("_doc")); @@ -133,7 +132,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .setRouting("foo", "bar") .addSort(SortBuilders.fieldSort("_doc")); @@ -151,7 +150,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("alias1", "alias3").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); assertSearchSlicesWithScroll(request, "_id", max, numDocs); @@ -176,7 +175,7 @@ private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String f searchResponse.decRef(); searchResponse = client().prepareSearchScroll("test") .setScrollId(scrollId) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .get(); scrollId = searchResponse.getScrollId(); totalResults += searchResponse.getHits().getHits().length; @@ -271,7 +270,7 @@ public void testInvalidFields() throws Exception { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .slice(new SliceBuilder("invalid_random_int", 0, 10)) ); @@ -282,7 +281,7 @@ public void testInvalidFields() throws Exception { exc = expectThrows( SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .slice(new SliceBuilder("invalid_random_kw", 0, 10)) ); rootCause = findRootCause(exc); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index bf7a315040caa..f407c14c48c52 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -2003,10 +2003,12 @@ public void testLongSortOptimizationCorrectResults() { } public void testSortMixedFieldTypes() { - assertAcked(prepareCreate("index_long").setMapping("foo", "type=long").get()); - assertAcked(prepareCreate("index_integer").setMapping("foo", "type=integer").get()); - assertAcked(prepareCreate("index_double").setMapping("foo", "type=double").get()); - assertAcked(prepareCreate("index_keyword").setMapping("foo", "type=keyword").get()); + assertAcked( + prepareCreate("index_long").setMapping("foo", "type=long"), + prepareCreate("index_integer").setMapping("foo", "type=integer"), + prepareCreate("index_double").setMapping("foo", "type=double"), + prepareCreate("index_keyword").setMapping("foo", "type=keyword") + ); prepareIndex("index_long").setId("1").setSource("foo", "123").get(); prepareIndex("index_integer").setId("1").setSource("foo", "123").get(); @@ -2038,9 +2040,11 @@ public void testSortMixedFieldTypes() { } public void testSortMixedFieldTypesWithNoDocsForOneType() { - assertAcked(prepareCreate("index_long").setMapping("foo", "type=long").get()); - assertAcked(prepareCreate("index_other").setMapping("bar", "type=keyword").get()); - assertAcked(prepareCreate("index_double").setMapping("foo", "type=double").get()); + assertAcked( + prepareCreate("index_long").setMapping("foo", "type=long"), + prepareCreate("index_other").setMapping("bar", "type=keyword"), + prepareCreate("index_double").setMapping("foo", "type=double") + ); prepareIndex("index_long").setId("1").setSource("foo", "123").get(); prepareIndex("index_long").setId("2").setSource("foo", "124").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java index e80678c4f5fc6..f55d4505f3f58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -45,7 +45,7 @@ protected boolean forbidPrivateIndexSettings() { } public void testDistanceSortingMVFields() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -237,7 +237,7 @@ public void testDistanceSortingMVFields() throws Exception { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -299,7 +299,7 @@ public void testDistanceSortingWithMissingGeoPoint() throws Exception { } public void testDistanceSortingNestedFields() throws Exception { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() @@ -551,7 +551,7 @@ public void testDistanceSortingNestedFields() throws Exception { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - IndexVersion version = IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; double lon = -73.998776; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index aabca1b9333f8..d53c90a5d1e28 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -60,7 +60,7 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce * |___________________________ * 1 2 3 4 5 6 7 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -152,7 +152,7 @@ public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedExc * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -225,7 +225,7 @@ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionExcept * |______________________ * 1 2 3 4 5 6 */ - IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleVersion(random()); + IndexVersion version = randomBoolean() ? IndexVersion.current() : IndexVersionUtils.randomCompatibleWriteVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).setMapping(LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index 9f40b1928dce6..2530dd35946fe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -19,22 +19,17 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.search.stats.SearchStats.Stats; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.FailingFieldPlugin; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; @@ -80,50 +75,6 @@ protected Map, Object>> pluginScripts() { } } - public static class FailingFieldPlugin extends Plugin implements ScriptPlugin { - - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "failing_field"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - throw new IllegalArgumentException("Accessing failing field"); - } - }; - } - }; - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } - } - @Override protected int numberOfReplicas() { return 0; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index 209195301a659..caf7f852cc15e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -565,9 +565,7 @@ public void testSnapshotQueuedAfterCloneFromBrokenSourceSnapshot() throws Except assertSuccessful(sn1); assertSuccessful(sn2); assertSuccessful(sn3); - assertAcked(clone1.get()); - assertAcked(clone2.get()); - assertAcked(clone3.get()); + assertAcked(clone1, clone2, clone3); } public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throws Exception { @@ -624,8 +622,7 @@ public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws E unblockNode(repoName, masterName); awaitNoMoreRunningOperations(masterName); awaitMasterFinishRepoOperations(); - assertAcked(blockedClone.get()); - assertAcked(otherClone.get()); + assertAcked(blockedClone, otherClone); assertEquals(getSnapshot(repoName, cloneName).state(), SnapshotState.SUCCESS); assertEquals(getSnapshot(repoName, otherCloneName).state(), SnapshotState.SUCCESS); } @@ -732,8 +729,7 @@ public void testManyConcurrentClonesStartOutOfOrder() throws Exception { awaitClusterState(state -> SnapshotsInProgress.get(state).forRepo(repoName).stream().anyMatch(entry -> entry.state().completed())); repo.unblock(); - assertAcked(clone1.get()); - assertAcked(clone2.get()); + assertAcked(clone1, clone2); } public void testRemoveFailedCloneFromCSWithoutIO() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index de62c0152817a..e16aff56527c3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -478,8 +478,7 @@ public void testCascadedAborts() throws Exception { assertThat(thirdSnapshotResponse.get().getSnapshotInfo().state(), is(SnapshotState.FAILED)); logger.info("--> verify both deletes have completed"); - assertAcked(deleteSnapshotsResponse.get()); - assertAcked(allDeletedResponse.get()); + assertAcked(deleteSnapshotsResponse, allDeletedResponse); logger.info("--> verify that all snapshots are gone"); assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); @@ -715,8 +714,7 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { networkDisruption.stopDisrupting(); logger.info("--> make sure all failing requests get a response"); - assertAcked(firstDeleteFuture.get()); - assertAcked(secondDeleteFuture.get()); + assertAcked(firstDeleteFuture, secondDeleteFuture); expectThrows(SnapshotException.class, createSnapshot); awaitNoMoreRunningOperations(); } @@ -1014,8 +1012,7 @@ public void testBackToBackQueuedDeletes() throws Exception { awaitNDeletionsInProgress(2); unblockNode(repoName, masterName); - assertAcked(deleteSnapshotOne.get()); - assertAcked(deleteSnapshotTwo.get()); + assertAcked(deleteSnapshotOne, deleteSnapshotTwo); final RepositoryData repositoryData = getRepositoryData(repoName); assertThat(repositoryData.getSnapshotIds(), empty()); @@ -1361,9 +1358,12 @@ public void testConcurrentOperationsLimit() throws Exception { if (deleteAndAbortAll) { awaitNumberOfSnapshotsInProgress(0); for (ActionFuture snapshotFuture : snapshotFutures) { - // just check that the futures resolve, whether or not things worked out with the snapshot actually finalizing or failing - // due to the abort does not matter - assertBusy(() -> assertTrue(snapshotFuture.isDone())); + try { + snapshotFuture.get(); + } catch (ExecutionException e) { + // just check that the futures resolve, whether or not things worked out with the snapshot actually finalizing or + // failing due to the abort does not matter + } } assertThat(getRepositoryData(repoName).getSnapshotIds(), empty()); } else { @@ -1890,8 +1890,7 @@ public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Excep assertSuccessful(snapshot3); unblockNode(repository, master); - assertAcked(cloneSnapshot.get()); - assertAcked(cloneSnapshot2.get()); + assertAcked(cloneSnapshot, cloneSnapshot2); assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); assertThat( @@ -2031,8 +2030,7 @@ public void testCloneQueuedAfterMissingShard() throws Exception { awaitNumberOfSnapshotsInProgress(2); unblockNode(repository, master); - assertAcked(deleteFuture.get()); - assertAcked(cloneFuture.get()); + assertAcked(deleteFuture, cloneFuture); awaitNoMoreRunningOperations(); assertThat(snapshot1.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); } @@ -2109,8 +2107,7 @@ public void testSnapshotAndCloneQueuedAfterMissingShard() throws Exception { awaitNumberOfSnapshotsInProgress(3); unblockNode(repository, master); - assertAcked(deleteFuture.get()); - assertAcked(cloneFuture.get()); + assertAcked(deleteFuture, cloneFuture); awaitNoMoreRunningOperations(); assertThat(snapshot1.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); assertThat(snapshot2.get().getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 588753f570c97..0d359300bbdc1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -664,8 +664,7 @@ public void testRestoreShrinkIndex() throws Exception { createSnapshot(repo, snapshot, Collections.singletonList(shrunkIdx)); logger.info("--> delete index and stop the data node"); - assertAcked(indicesAdmin().prepareDelete(sourceIdx).get()); - assertAcked(indicesAdmin().prepareDelete(shrunkIdx).get()); + assertAcked(indicesAdmin().prepareDelete(sourceIdx), indicesAdmin().prepareDelete(shrunkIdx)); internalCluster().stopRandomDataNode(); clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("1"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 4ba06a34ca3a7..b2bb38c5364a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -340,14 +340,14 @@ public void testRestoreAliases() throws Exception { .addAlias("test-idx-1", "alias-1") ); - assertFalse(indicesAdmin().prepareGetAliases("alias-123").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-123").get().getAliases().isEmpty()); createSnapshot("test-repo", "test-snap", Collections.emptyList()); logger.info("--> delete all indices"); cluster().wipeIndices("test-idx-1", "test-idx-2", "test-idx-3"); - assertTrue(indicesAdmin().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertTrue(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-123").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot with aliases"); RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( @@ -362,8 +362,8 @@ public void testRestoreAliases() throws Exception { ); logger.info("--> check that aliases are restored"); - assertFalse(indicesAdmin().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-123").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-1").get().getAliases().isEmpty()); logger.info("--> update aliases"); assertAcked(indicesAdmin().prepareAliases().removeAlias("test-idx-3", "alias-123")); @@ -372,8 +372,8 @@ public void testRestoreAliases() throws Exception { logger.info("--> delete and close indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); assertAcked(indicesAdmin().prepareClose("test-idx-3")); - assertTrue(indicesAdmin().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertTrue(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-123").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot without aliases"); restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") @@ -388,9 +388,9 @@ public void testRestoreAliases() throws Exception { ); logger.info("--> check that aliases are not restored and existing aliases still exist"); - assertTrue(indicesAdmin().prepareGetAliases("alias-123").get().getAliases().isEmpty()); - assertTrue(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); - assertFalse(indicesAdmin().prepareGetAliases("alias-3").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-123").get().getAliases().isEmpty()); + assertTrue(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-1").get().getAliases().isEmpty()); + assertFalse(indicesAdmin().prepareGetAliases(TEST_REQUEST_TIMEOUT, "alias-3").get().getAliases().isEmpty()); } public void testRestoreTemplates() throws Exception { @@ -424,7 +424,7 @@ public void testRestoreTemplates() throws Exception { logger.info("--> delete test template"); assertThat(indicesAdmin().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true)); - GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); @@ -437,7 +437,7 @@ public void testRestoreTemplates() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template is restored"); - getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); + getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index 8adac063dcc5b..99cfafe48bc4f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -133,7 +133,7 @@ public void testIncludeGlobalState() throws Exception { if (testTemplate) { logger.info("--> delete test template"); cluster().wipeTemplates("test-template"); - GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); } @@ -161,7 +161,7 @@ public void testIncludeGlobalState() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); - GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); + GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); @@ -173,7 +173,7 @@ public void testIncludeGlobalState() throws Exception { if (testTemplate) { logger.info("--> check that template is restored"); - getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); + getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertIndexTemplateExists(getIndexTemplatesResponse, "test-template"); } @@ -225,7 +225,7 @@ public void testIncludeGlobalState() throws Exception { ); } - getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); + getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> try restoring index and cluster state from snapshot without global state"); @@ -238,7 +238,7 @@ public void testIncludeGlobalState() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); logger.info("--> check that global state wasn't restored but index was"); - getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); + getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates(TEST_REQUEST_TIMEOUT).get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); assertFalse(getPipelines("barbaz").isFound()); assertNull(safeExecute(GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "foobar")).getSource()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index 755ee960be73e..aeac8959df617 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -753,11 +753,16 @@ private static void putShutdownMetadata( clusterService.submitUnbatchedStateUpdateTask("mark node for removal", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - final var nodeId = currentState.nodes().resolveNode(nodeName).getId(); + final var node = currentState.nodes().resolveNode(nodeName); return currentState.copyAndUpdateMetadata( mdb -> mdb.putCustom( NodesShutdownMetadata.TYPE, - new NodesShutdownMetadata(Map.of(nodeId, shutdownMetadataBuilder.setNodeId(nodeId).build())) + new NodesShutdownMetadata( + Map.of( + node.getId(), + shutdownMetadataBuilder.setNodeId(node.getId()).setNodeEphemeralId(node.getEphemeralId()).build() + ) + ) ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 15c5e3379734a..ac322868989bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -1235,15 +1235,16 @@ public ClusterState execute(ClusterState currentState) { Strings.toString(currentState), currentState.metadata().nodeShutdowns().getAll().isEmpty() ); - final var nodeId = currentState.nodes().resolveNode(node.nodeName).getId(); + final var discoveryNode = currentState.nodes().resolveNode(node.nodeName); return currentState.copyAndUpdateMetadata( mdb -> mdb.putCustom( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( - nodeId, + discoveryNode.getId(), SingleNodeShutdownMetadata.builder() - .setNodeId(nodeId) + .setNodeId(discoveryNode.getId()) + .setNodeEphemeralId(discoveryNode.getEphemeralId()) .setType(SingleNodeShutdownMetadata.Type.REMOVE) .setStartedAtMillis(clusterService.threadPool().absoluteTimeInMillis()) .setReason("test") diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 51896fb80a62c..2a68b65bcdccb 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -424,21 +424,14 @@ provides org.elasticsearch.features.FeatureSpecification with - org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures, org.elasticsearch.action.bulk.BulkFeatures, org.elasticsearch.features.FeatureInfrastructureFeatures, - org.elasticsearch.health.HealthFeatures, - org.elasticsearch.cluster.metadata.MetadataFeatures, - org.elasticsearch.rest.RestFeatures, - org.elasticsearch.repositories.RepositoriesFeatures, - org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures, org.elasticsearch.index.mapper.MapperFeatures, - org.elasticsearch.ingest.IngestGeoIpFeatures, + org.elasticsearch.index.IndexFeatures, org.elasticsearch.search.SearchFeatures, org.elasticsearch.script.ScriptFeatures, org.elasticsearch.search.retriever.RetrieversFeatures, - org.elasticsearch.reservedstate.service.FileSettingsFeatures, org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures; uses org.elasticsearch.plugins.internal.SettingsExtension; @@ -480,4 +473,5 @@ exports org.elasticsearch.inference.configuration; exports org.elasticsearch.monitor.metrics; exports org.elasticsearch.plugins.internal.rewriter to org.elasticsearch.inference; + exports org.elasticsearch.lucene.util.automaton; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index d6e1d31d9b7f7..b2300e1640104 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -52,7 +52,10 @@ static TransportVersion def(int id) { @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // remove the transport versions with which v9 will not need to interact public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); + public static final TransportVersion V_7_1_0 = def(7_01_00_99); + public static final TransportVersion V_7_2_0 = def(7_02_00_99); public static final TransportVersion V_7_3_0 = def(7_03_00_99); + public static final TransportVersion V_7_3_2 = def(7_03_02_99); public static final TransportVersion V_7_4_0 = def(7_04_00_99); public static final TransportVersion V_7_6_0 = def(7_06_00_99); public static final TransportVersion V_7_8_0 = def(7_08_00_99); @@ -137,6 +140,21 @@ static TransportVersion def(int id) { public static final TransportVersion KNN_QUERY_RESCORE_OVERSAMPLE = def(8_806_00_0); public static final TransportVersion SEMANTIC_QUERY_LENIENT = def(8_807_00_0); public static final TransportVersion ESQL_QUERY_BUILDER_IN_SEARCH_FUNCTIONS = def(8_808_00_0); + public static final TransportVersion EQL_ALLOW_PARTIAL_SEARCH_RESULTS = def(8_809_00_0); + public static final TransportVersion NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION = def(8_810_00_0); + public static final TransportVersion ERROR_TRACE_IN_TRANSPORT_HEADER = def(8_811_00_0); + public static final TransportVersion FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING = def(8_812_00_0); + public static final TransportVersion SIMULATE_IGNORED_FIELDS = def(8_813_00_0); + public static final TransportVersion TRANSFORMS_UPGRADE_MODE = def(8_814_00_0); + public static final TransportVersion NODE_SHUTDOWN_EPHEMERAL_ID_ADDED = def(8_815_00_0); + public static final TransportVersion ESQL_CCS_TELEMETRY_STATS = def(8_816_00_0); + public static final TransportVersion TEXT_EMBEDDING_QUERY_VECTOR_BUILDER_INFER_MODEL_ID = def(8_817_00_0); + public static final TransportVersion ESQL_ENABLE_NODE_LEVEL_REDUCTION = def(8_818_00_0); + public static final TransportVersion JINA_AI_INTEGRATION_ADDED = def(8_819_00_0); + public static final TransportVersion TRACK_INDEX_FAILED_DUE_TO_VERSION_CONFLICT_METRIC = def(8_820_00_0); + public static final TransportVersion REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX = def(8_821_00_0); + public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0); + public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 47c43eadcfb03..8873c9b0e281e 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -191,6 +191,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_16_2 = new Version(8_16_02_99); + public static final Version V_8_16_3 = new Version(8_16_03_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_8_17_1 = new Version(8_17_01_99); public static final Version V_8_18_0 = new Version(8_18_00_99); diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 98d6284fd91d2..ec393b7af5cdf 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -1004,7 +1004,7 @@ public void initRestHandlers(Supplier nodesInCluster, Predicate< // Desired nodes registerHandler.accept(new RestGetDesiredNodesAction()); - registerHandler.accept(new RestUpdateDesiredNodesAction(clusterSupportsFeature)); + registerHandler.accept(new RestUpdateDesiredNodesAction()); registerHandler.accept(new RestDeleteDesiredNodesAction()); for (ActionPlugin plugin : actionPlugins) { diff --git a/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java b/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java index f149603f12d8b..16f37c9573a8e 100644 --- a/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java +++ b/server/src/main/java/org/elasticsearch/action/ResolvedIndices.java @@ -150,10 +150,26 @@ public static ResolvedIndices resolveWithIndicesRequest( RemoteClusterService remoteClusterService, long startTimeInMillis ) { - final Map remoteClusterIndices = remoteClusterService.groupIndices( + return resolveWithIndexNamesAndOptions( + request.indices(), request.indicesOptions(), - request.indices() + clusterState, + indexNameExpressionResolver, + remoteClusterService, + startTimeInMillis ); + } + + public static ResolvedIndices resolveWithIndexNamesAndOptions( + String[] indexNames, + IndicesOptions indicesOptions, + ClusterState clusterState, + IndexNameExpressionResolver indexNameExpressionResolver, + RemoteClusterService remoteClusterService, + long startTimeInMillis + ) { + final Map remoteClusterIndices = remoteClusterService.groupIndices(indicesOptions, indexNames); + final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); Index[] concreteLocalIndices = localIndices == null diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java deleted file mode 100644 index d754d652c8d88..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.admin.cluster.allocation; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class AllocationStatsFeatures implements FeatureSpecification { - public static final NodeFeature INCLUDE_DISK_THRESHOLD_SETTINGS = new NodeFeature("stats.include_disk_thresholds"); - - @Override - public Set getFeatures() { - return Set.of(INCLUDE_DISK_THRESHOLD_SETTINGS); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index 23f88540c2666..3b6a161c3db8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -37,7 +37,7 @@ import java.util.Locale; import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent; -import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk; +import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk; /** * A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned, @@ -169,7 +169,7 @@ public ShardAllocationDecision getShardAllocationDecision() { } public Iterator toXContentChunked(ToXContent.Params params) { - return Iterators.concat(singleChunk((builder, p) -> { + return Iterators.concat(chunk((builder, p) -> { builder.startObject(); if (isSpecificShard() == false) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java index 2f22b63f78bc6..2a35851f0017b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java @@ -33,8 +33,8 @@ import java.util.Objects; import java.util.Set; +import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.endObject; -import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk; import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.startObject; public class DesiredBalanceResponse extends ActionResponse implements ChunkedToXContentObject { @@ -88,7 +88,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat( - singleChunk( + chunk( (builder, p) -> builder.startObject() .field("stats", stats) .field("cluster_balance_stats", clusterBalanceStats) @@ -101,16 +101,16 @@ public Iterator toXContentChunked(ToXContent.Params params Iterators.flatMap( indexEntry.getValue().entrySet().iterator(), shardEntry -> Iterators.concat( - singleChunk((builder, p) -> builder.field(String.valueOf(shardEntry.getKey()))), + chunk((builder, p) -> builder.field(String.valueOf(shardEntry.getKey()))), shardEntry.getValue().toXContentChunked(params) ) ), endObject() ) ), - singleChunk((builder, p) -> builder.endObject().startObject("cluster_info")), + chunk((builder, p) -> builder.endObject().startObject("cluster_info")), clusterInfo.toXContentChunked(params), - singleChunk((builder, p) -> builder.endObject().endObject()) + chunk((builder, p) -> builder.endObject().endObject()) ); } @@ -173,9 +173,9 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat( - singleChunk((builder, p) -> builder.startObject().startArray("current")), + chunk((builder, p) -> builder.startObject().startArray("current")), current().iterator(), - singleChunk((builder, p) -> builder.endArray().field("desired").value(desired, p).endObject()) + chunk((builder, p) -> builder.endArray().field("desired").value(desired, p).endObject()) ); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java index d929fb457d5d1..23bf22e08985e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -49,7 +48,6 @@ public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAc private final AllocationStatsService allocationStatsService; private final DiskThresholdSettings diskThresholdSettings; - private final FeatureService featureService; @Inject public TransportGetAllocationStatsAction( @@ -58,8 +56,7 @@ public TransportGetAllocationStatsAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - AllocationStatsService allocationStatsService, - FeatureService featureService + AllocationStatsService allocationStatsService ) { super( TYPE.name(), @@ -74,7 +71,6 @@ public TransportGetAllocationStatsAction( ); this.allocationStatsService = allocationStatsService; this.diskThresholdSettings = new DiskThresholdSettings(clusterService.getSettings(), clusterService.getClusterSettings()); - this.featureService = featureService; } @Override @@ -92,10 +88,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A listener.onResponse( new Response( request.metrics().contains(Metric.ALLOCATIONS) ? allocationStatsService.stats() : Map.of(), - request.metrics().contains(Metric.FS) - && featureService.clusterHasFeature(clusterService.state(), AllocationStatsFeatures.INCLUDE_DISK_THRESHOLD_SETTINGS) - ? diskThresholdSettings - : null + request.metrics().contains(Metric.FS) ? diskThresholdSettings : null ) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java index 9fede2ebb5be6..beb0e1f927de2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.admin.cluster.node.capabilities; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; @@ -20,11 +19,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; @@ -32,7 +29,6 @@ import java.io.IOException; import java.util.List; -import java.util.Optional; import java.util.Set; public class TransportNodesCapabilitiesAction extends TransportNodesAction< @@ -45,7 +41,6 @@ public class TransportNodesCapabilitiesAction extends TransportNodesAction< public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/capabilities"); private final RestController restController; - private final FeatureService featureService; @Inject public TransportNodesCapabilitiesAction( @@ -53,8 +48,7 @@ public TransportNodesCapabilitiesAction( ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - RestController restController, - FeatureService featureService + RestController restController ) { super( TYPE.name(), @@ -65,23 +59,6 @@ public TransportNodesCapabilitiesAction( threadPool.executor(ThreadPool.Names.MANAGEMENT) ); this.restController = restController; - this.featureService = featureService; - } - - @Override - protected void doExecute(Task task, NodesCapabilitiesRequest request, ActionListener listener) { - if (featureService.clusterHasFeature(clusterService.state(), RestNodesCapabilitiesAction.CAPABILITIES_ACTION) == false) { - // not everything in the cluster supports capabilities. - // Therefore we don't support whatever it is we're being asked for - listener.onResponse(new NodesCapabilitiesResponse(clusterService.getClusterName(), List.of(), List.of()) { - @Override - public Optional isSupported() { - return Optional.of(false); - } - }); - } else { - super.doExecute(task, request, listener); - } } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index e194101738489..ae4d6cb92c08f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -15,9 +15,11 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.http.HttpStats; @@ -38,10 +40,13 @@ import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.chunk; + /** * Node statistics (dynamic, changes depending on when created). */ @@ -342,7 +347,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params outerParams) { - return ChunkedToXContent.builder(outerParams).append((builder, params) -> { + return Iterators.concat(chunk((builder, params) -> { builder.field("name", getNode().getName()); builder.field("transport_address", getNode().getAddress().toString()); builder.field("host", getNode().getHostName()); @@ -353,7 +358,6 @@ public Iterator toXContentChunked(ToXContent.Params outerP builder.value(role.roleName()); } builder.endArray(); - if (getNode().getAttributes().isEmpty() == false) { builder.startObject("attributes"); for (Map.Entry attrEntry : getNode().getAttributes().entrySet()) { @@ -361,30 +365,38 @@ public Iterator toXContentChunked(ToXContent.Params outerP } builder.endObject(); } + return builder; - }) - - .appendIfPresent(getIndices()) - .append((builder, p) -> builder.value(ifPresent(getOs()), p).value(ifPresent(getProcess()), p).value(ifPresent(getJvm()), p)) - - .appendIfPresent(getThreadPool()) - .appendIfPresent(getFs()) - .appendIfPresent(getTransport()) - .appendIfPresent(getHttp()) - .appendIfPresent(getBreaker()) - .appendIfPresent(getScriptStats()) - .appendIfPresent(getDiscoveryStats()) - .appendIfPresent(getIngestStats()) - .appendIfPresent(getAdaptiveSelectionStats()) - .appendIfPresent(getScriptCacheStats()) - .append( + }), + ifPresent(getIndices()).toXContentChunked(outerParams), + chunk((builder, p) -> builder.value(ifPresent(getOs()), p).value(ifPresent(getProcess()), p).value(ifPresent(getJvm()), p)), + ifPresent(getThreadPool()).toXContentChunked(outerParams), + singleChunkIfPresent(getFs()), + ifPresent(getTransport()).toXContentChunked(outerParams), + ifPresent(getHttp()).toXContentChunked(outerParams), + singleChunkIfPresent(getBreaker()), + ifPresent(getScriptStats()).toXContentChunked(outerParams), + singleChunkIfPresent(getDiscoveryStats()), + ifPresent(getIngestStats()).toXContentChunked(outerParams), + singleChunkIfPresent(getAdaptiveSelectionStats()), + singleChunkIfPresent(getScriptCacheStats()), + chunk( (builder, p) -> builder.value(ifPresent(getIndexingPressureStats()), p) .value(ifPresent(getRepositoriesStats()), p) .value(ifPresent(getNodeAllocationStats()), p) - ); + ) + ); + } + + private static ChunkedToXContent ifPresent(@Nullable ChunkedToXContent chunkedToXContent) { + return Objects.requireNonNullElse(chunkedToXContent, ChunkedToXContent.EMPTY); } private static ToXContent ifPresent(@Nullable ToXContent toXContent) { return Objects.requireNonNullElse(toXContent, ToXContent.EMPTY); } + + private static Iterator singleChunkIfPresent(ToXContent toXContent) { + return toXContent == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(toXContent); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 1f0e0daf77d7b..25d34957b6958 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -14,9 +14,10 @@ import org.elasticsearch.action.support.nodes.BaseNodesXContentResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; @@ -41,12 +42,15 @@ protected void writeNodesTo(StreamOutput out, List nodes) throws IOEx @Override protected Iterator xContentChunks(ToXContent.Params outerParams) { - return ChunkedToXContent.builder(outerParams) - .object( - "nodes", - getNodes().iterator(), - (b, ns) -> b.object(ns.getNode().getId(), ob -> ob.field("timestamp", ns.getTimestamp()).append(ns)) - ); + return Iterators.concat( + ChunkedToXContentHelper.startObject("nodes"), + Iterators.flatMap(getNodes().iterator(), nodeStats -> Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(nodeStats.getNode().getId()); + builder.field("timestamp", nodeStats.getTimestamp()); + return builder; + }), nodeStats.toXContentChunked(outerParams), ChunkedToXContentHelper.endObject())), + ChunkedToXContentHelper.endObject() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 45ee00a98c2e2..f04db5eec667b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -13,11 +13,12 @@ import org.elasticsearch.action.support.master.IsAcknowledgedSupplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.UpdateForV10; @@ -25,6 +26,7 @@ import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import java.util.Objects; @@ -92,14 +94,17 @@ public Iterator toXContentChunked(ToXContent.Params outerP if (emitState(outerParams)) { deprecationLogger.critical(DeprecationCategory.API, "reroute_cluster_state", STATE_FIELD_DEPRECATION_MESSAGE); } - return ChunkedToXContent.builder(outerParams).object(b -> { - b.field(ACKNOWLEDGED_KEY, isAcknowledged()); - if (emitState(outerParams)) { - b.xContentObject("state", state); - } - if (outerParams.paramAsBoolean("explain", false)) { - b.append(explanations); - } - }); + return Iterators.concat( + Iterators.single((builder, params) -> builder.startObject().field(ACKNOWLEDGED_KEY, isAcknowledged())), + emitState(outerParams) + ? ChunkedToXContentHelper.object("state", state.toXContentChunked(outerParams)) + : Collections.emptyIterator(), + Iterators.single((builder, params) -> { + if (params.paramAsBoolean("explain", false)) { + explanations.toXContent(builder, params); + } + return builder.endObject(); + }) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java index 7e3c38c735091..ca02d19749ae7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java @@ -13,13 +13,18 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.local.LocalClusterStateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV10; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import java.util.Objects; public class ClusterGetSettingsAction extends ActionType { @@ -34,25 +39,29 @@ public ClusterGetSettingsAction() { /** * Request to retrieve the cluster settings */ - public static class Request extends MasterNodeReadRequest { + public static class Request extends LocalClusterStateRequest { public Request(TimeValue masterNodeTimeout) { super(masterNodeTimeout); } + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) public Request(StreamInput in) throws IOException { super(in); assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); } @Override - public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); - super.writeTo(out); + public ActionRequestValidationException validate() { + return null; } @Override - public ActionRequestValidationException validate() { - return null; + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); } } @@ -79,20 +88,17 @@ public int hashCode() { return Objects.hash(persistentSettings, transientSettings, settings); } - public Response(StreamInput in) throws IOException { - super(in); - assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); - persistentSettings = Settings.readSettingsFromStream(in); - transientSettings = Settings.readSettingsFromStream(in); - settings = Settings.readSettingsFromStream(in); - } - public Response(Settings persistentSettings, Settings transientSettings, Settings settings) { this.persistentSettings = Objects.requireNonNullElse(persistentSettings, Settings.EMPTY); this.transientSettings = Objects.requireNonNullElse(transientSettings, Settings.EMPTY); this.settings = Objects.requireNonNullElse(settings, Settings.EMPTY); } + /** + * NB prior to 9.0 get-component was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) @Override public void writeTo(StreamOutput out) throws IOException { assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java index dce6a38001392..71b976e012aad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java @@ -11,57 +11,66 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.local.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -public class TransportClusterGetSettingsAction extends TransportMasterNodeReadAction< +public class TransportClusterGetSettingsAction extends TransportLocalClusterStateAction< ClusterGetSettingsAction.Request, ClusterGetSettingsAction.Response> { private final SettingsFilter settingsFilter; + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) + @SuppressWarnings("this-escape") @Inject public TransportClusterGetSettingsAction( TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, SettingsFilter settingsFilter, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + ActionFilters actionFilters ) { super( ClusterGetSettingsAction.NAME, - false, - transportService, - clusterService, - threadPool, actionFilters, - ClusterGetSettingsAction.Request::new, - indexNameExpressionResolver, - ClusterGetSettingsAction.Response::new, + transportService.getTaskManager(), + clusterService, EsExecutors.DIRECT_EXECUTOR_SERVICE ); - this.settingsFilter = settingsFilter; + + transportService.registerRequestHandler( + actionName, + executor, + false, + true, + ClusterGetSettingsAction.Request::new, + (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel)) + ); } @Override - protected void masterOperation( + protected void localClusterStateOperation( Task task, ClusterGetSettingsAction.Request request, ClusterState state, ActionListener listener ) throws Exception { + ((CancellableTask) task).ensureNotCancelled(); Metadata metadata = state.metadata(); listener.onResponse( new ClusterGetSettingsAction.Response( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index b6ced06623306..bfdf41e58f6d3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -61,7 +61,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest repositoryFailures; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index b7a513b3cb08e..24c427c32d69a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -44,9 +43,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest */ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment { - public static final String CCS_TELEMETRY_FIELD_NAME = "_search"; private long totalCount; private long successCount; private final Map failureReasons; @@ -66,6 +65,9 @@ public final class CCSTelemetrySnapshot implements Writeable, ToXContentFragment private final Map clientCounts; private final Map byRemoteCluster; + // Whether we should use per-MRT (minimize roundtrips) metrics. + // ES|QL does not have "minimize_roundtrips" option, so we don't collect those metrics for ES|QL usage. + private boolean useMRT = true; /** * Creates a new stats instance with the provided info. @@ -191,6 +193,11 @@ public Map getByRemoteCluster() { return Collections.unmodifiableMap(byRemoteCluster); } + public CCSTelemetrySnapshot setUseMRT(boolean useMRT) { + this.useMRT = useMRT; + return this; + } + public static class PerClusterCCSTelemetry implements Writeable, ToXContentFragment { private long count; private long skippedCount; @@ -270,6 +277,11 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(count, skippedCount, took); } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } } /** @@ -291,8 +303,10 @@ public void add(CCSTelemetrySnapshot stats) { stats.featureCounts.forEach((k, v) -> featureCounts.merge(k, v, Long::sum)); stats.clientCounts.forEach((k, v) -> clientCounts.merge(k, v, Long::sum)); took.add(stats.took); - tookMrtTrue.add(stats.tookMrtTrue); - tookMrtFalse.add(stats.tookMrtFalse); + if (useMRT) { + tookMrtTrue.add(stats.tookMrtTrue); + tookMrtFalse.add(stats.tookMrtFalse); + } remotesPerSearchMax = Math.max(remotesPerSearchMax, stats.remotesPerSearchMax); if (totalCount > 0 && oldCount > 0) { // Weighted average @@ -328,30 +342,28 @@ private static void publishLatency(XContentBuilder builder, String name, LongMet @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(CCS_TELEMETRY_FIELD_NAME); - { - builder.field("total", totalCount); - builder.field("success", successCount); - builder.field("skipped", skippedRemotes); - publishLatency(builder, "took", took); + builder.field("total", totalCount); + builder.field("success", successCount); + builder.field("skipped", skippedRemotes); + publishLatency(builder, "took", took); + if (useMRT) { publishLatency(builder, "took_mrt_true", tookMrtTrue); publishLatency(builder, "took_mrt_false", tookMrtFalse); - builder.field("remotes_per_search_max", remotesPerSearchMax); - builder.field("remotes_per_search_avg", remotesPerSearchAvg); - builder.field("failure_reasons", failureReasons); - builder.field("features", featureCounts); - builder.field("clients", clientCounts); - builder.startObject("clusters"); - { - for (var entry : byRemoteCluster.entrySet()) { - String remoteName = entry.getKey(); - if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(remoteName)) { - remoteName = SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; - } - builder.field(remoteName, entry.getValue()); + } + builder.field("remotes_per_search_max", remotesPerSearchMax); + builder.field("remotes_per_search_avg", remotesPerSearchAvg); + builder.field("failure_reasons", failureReasons); + builder.field("features", featureCounts); + builder.field("clients", clientCounts); + builder.startObject("clusters"); + { + for (var entry : byRemoteCluster.entrySet()) { + String remoteName = entry.getKey(); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(remoteName)) { + remoteName = SearchResponse.LOCAL_CLUSTER_NAME_REPRESENTATION; } + builder.field(remoteName, entry.getValue()); } - builder.endObject(); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java index 9e58d6d8febef..29a7dcb5d07d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsage.java @@ -10,6 +10,7 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ShardOperationFailedException; @@ -20,6 +21,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.query.SearchTimeoutException; +import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import java.util.Arrays; @@ -84,6 +86,15 @@ public Builder setClient(String client) { return this; } + public Builder setClientFromTask(Task task) { + String client = task.getHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER); + if (client != null) { + return setClient(client); + } else { + return this; + } + } + public Builder skippedRemote(String remote) { this.skippedRemotes.add(remote); return this; @@ -133,6 +144,10 @@ public static Result getFailureType(Exception e) { if (ExceptionsHelper.unwrapCorruption(e) != null) { return Result.CORRUPTION; } + ElasticsearchStatusException se = (ElasticsearchStatusException) ExceptionsHelper.unwrap(e, ElasticsearchStatusException.class); + if (se != null && se.getDetailedMessage().contains("license")) { + return Result.LICENSE; + } // This is kind of last resort check - if we still don't know the reason but all shard failures are remote, // we assume it's remote's fault somehow. if (e instanceof SearchPhaseExecutionException spe) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java index 6c8178282d3c3..3f04eceed7eb5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java @@ -47,6 +47,7 @@ public enum Result { TIMEOUT("timeout"), CORRUPTION("corruption"), SECURITY("security"), + LICENSE("license"), // May be helpful if there's a lot of other reasons, and it may be hard to calculate the unknowns for some clients. UNKNOWN("other"); @@ -106,8 +107,14 @@ public String getName() { private final Map clientCounts; private final Map byRemoteCluster; + // Should we calculate separate metrics per MRT? + private final boolean useMRT; public CCSUsageTelemetry() { + this(true); + } + + public CCSUsageTelemetry(boolean useMRT) { this.byRemoteCluster = new ConcurrentHashMap<>(); totalCount = new LongAdder(); successCount = new LongAdder(); @@ -119,6 +126,7 @@ public CCSUsageTelemetry() { skippedRemotes = new LongAdder(); featureCounts = new ConcurrentHashMap<>(); clientCounts = new ConcurrentHashMap<>(); + this.useMRT = useMRT; } public void updateUsage(CCSUsage ccsUsage) { @@ -134,10 +142,12 @@ private void doUpdate(CCSUsage ccsUsage) { if (isSuccess(ccsUsage)) { successCount.increment(); took.record(searchTook); - if (isMRT(ccsUsage)) { - tookMrtTrue.record(searchTook); - } else { - tookMrtFalse.record(searchTook); + if (useMRT) { + if (isMRT(ccsUsage)) { + tookMrtTrue.record(searchTook); + } else { + tookMrtFalse.record(searchTook); + } } ccsUsage.getPerClusterUsage().forEach((r, u) -> byRemoteCluster.computeIfAbsent(r, PerClusterCCSTelemetry::new).update(u)); } else { @@ -243,6 +253,6 @@ public CCSTelemetrySnapshot getCCSTelemetrySnapshot() { Collections.unmodifiableMap(Maps.transformValues(featureCounts, LongAdder::longValue)), Collections.unmodifiableMap(Maps.transformValues(clientCounts, LongAdder::longValue)), Collections.unmodifiableMap(Maps.transformValues(byRemoteCluster, PerClusterCCSTelemetry::getSnapshot)) - ); + ).setUseMRT(useMRT); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index abeb73e5d8c3e..48b4e967742cd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -31,7 +31,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; private final RepositoryUsageStats repositoryUsageStats; - private final CCSTelemetrySnapshot ccsMetrics; + private final CCSTelemetrySnapshot searchCcsMetrics; + private final CCSTelemetrySnapshot esqlCcsMetrics; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); @@ -46,10 +47,15 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats = RepositoryUsageStats.readFrom(in); - ccsMetrics = new CCSTelemetrySnapshot(in); + searchCcsMetrics = new CCSTelemetrySnapshot(in); } else { repositoryUsageStats = RepositoryUsageStats.EMPTY; - ccsMetrics = new CCSTelemetrySnapshot(); + searchCcsMetrics = new CCSTelemetrySnapshot(); + } + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_TELEMETRY_STATS)) { + esqlCcsMetrics = new CCSTelemetrySnapshot(in); + } else { + esqlCcsMetrics = new CCSTelemetrySnapshot(); } } @@ -61,7 +67,8 @@ public ClusterStatsNodeResponse( ShardStats[] shardsStats, SearchUsageStats searchUsageStats, RepositoryUsageStats repositoryUsageStats, - CCSTelemetrySnapshot ccsTelemetrySnapshot + CCSTelemetrySnapshot ccsTelemetrySnapshot, + CCSTelemetrySnapshot esqlTelemetrySnapshot ) { super(node); this.nodeInfo = nodeInfo; @@ -70,7 +77,8 @@ public ClusterStatsNodeResponse( this.clusterStatus = clusterStatus; this.searchUsageStats = Objects.requireNonNull(searchUsageStats); this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); - this.ccsMetrics = ccsTelemetrySnapshot; + this.searchCcsMetrics = ccsTelemetrySnapshot; + this.esqlCcsMetrics = esqlTelemetrySnapshot; } public NodeInfo nodeInfo() { @@ -101,8 +109,12 @@ public RepositoryUsageStats repositoryUsageStats() { return repositoryUsageStats; } - public CCSTelemetrySnapshot getCcsMetrics() { - return ccsMetrics; + public CCSTelemetrySnapshot getSearchCcsMetrics() { + return searchCcsMetrics; + } + + public CCSTelemetrySnapshot getEsqlCcsMetrics() { + return esqlCcsMetrics; } @Override @@ -117,8 +129,11 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats.writeTo(out); - ccsMetrics.writeTo(out); + searchCcsMetrics.writeTo(out); } // else just drop these stats, ok for bwc + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_TELEMETRY_STATS)) { + esqlCcsMetrics.writeTo(out); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 5f7c45c5807a5..ed8ca2f94a78b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -36,10 +36,14 @@ public class ClusterStatsResponse extends BaseNodesResponse remoteClustersStats; + public static final String CCS_TELEMETRY_FIELD_NAME = "_search"; + public static final String ESQL_TELEMETRY_FIELD_NAME = "_esql"; + public ClusterStatsResponse( long timestamp, String clusterUUID, @@ -58,6 +62,7 @@ public ClusterStatsResponse( nodesStats = new ClusterStatsNodes(nodes); indicesStats = new ClusterStatsIndices(nodes, mappingStats, analysisStats, versionStats); ccsMetrics = new CCSTelemetrySnapshot(); + esqlMetrics = new CCSTelemetrySnapshot().setUseMRT(false); ClusterHealthStatus status = null; for (ClusterStatsNodeResponse response : nodes) { // only the master node populates the status @@ -66,7 +71,10 @@ public ClusterStatsResponse( break; } } - nodes.forEach(node -> ccsMetrics.add(node.getCcsMetrics())); + nodes.forEach(node -> { + ccsMetrics.add(node.getSearchCcsMetrics()); + esqlMetrics.add(node.getEsqlCcsMetrics()); + }); this.status = status; this.clusterSnapshotStats = clusterSnapshotStats; @@ -147,9 +155,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (remoteClustersStats != null) { builder.field("clusters", remoteClustersStats); } + builder.startObject(CCS_TELEMETRY_FIELD_NAME); ccsMetrics.toXContent(builder, params); builder.endObject(); + if (esqlMetrics.getTotalCount() > 0) { + builder.startObject(ESQL_TELEMETRY_FIELD_NAME); + esqlMetrics.toXContent(builder, params); + builder.endObject(); + } + + builder.endObject(); + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index 1bc2e1d13c864..29a124b3d0b20 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Nullable; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -71,7 +71,7 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { } AnalysisStats.countMapping(mappingCounts, indexMetadata); - var sourceMode = SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings()); + var sourceMode = IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings()); sourceModeUsageCount.merge(sourceMode.toString().toLowerCase(Locale.ENGLISH), 1, Integer::sum); } final AtomicLong totalFieldCount = new AtomicLong(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 2c20daa5d7afb..6f69def7aa4e0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -103,6 +103,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; private final CCSUsageTelemetry ccsUsageHolder; + private final CCSUsageTelemetry esqlUsageHolder; private final Executor clusterStateStatsExecutor; private final MetadataStatsCache mappingStatsCache; @@ -135,6 +136,7 @@ public TransportClusterStatsAction( this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); this.ccsUsageHolder = usageService.getCcsUsageHolder(); + this.esqlUsageHolder = usageService.getEsqlUsageHolder(); this.clusterStateStatsExecutor = threadPool.executor(ThreadPool.Names.MANAGEMENT); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); @@ -293,6 +295,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); final CCSTelemetrySnapshot ccsTelemetry = ccsUsageHolder.getCCSTelemetrySnapshot(); + final CCSTelemetrySnapshot esqlTelemetry = esqlUsageHolder.getCCSTelemetrySnapshot(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -302,7 +305,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq shardsStats.toArray(new ShardStats[shardsStats.size()]), searchUsageStats, repositoryUsageStats, - ccsTelemetry + ccsTelemetry, + esqlTelemetry ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index d66cab1d2d717..aa861148f022c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -74,7 +74,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest { - public GetAliasesRequestBuilder(ElasticsearchClient client, String... aliases) { - super(client, GetAliasesAction.INSTANCE, new GetAliasesRequest(aliases)); + public GetAliasesRequestBuilder(ElasticsearchClient client, TimeValue masterTimeout, String... aliases) { + super(client, GetAliasesAction.INSTANCE, new GetAliasesRequest(masterTimeout, aliases)); } public GetAliasesRequestBuilder setAliases(String... aliases) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index c233ed57b748e..cc96954c8a8e4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -480,13 +480,19 @@ public CreateIndexRequest requireDataStream(boolean requireDataStream) { return this; } + /** + * Returns whether the failure store should be initialized. N.B. If true, failure store index creation will be performed regardless of + * whether the template indicates that the failure store is enabled. + */ public boolean isInitializeFailureStore() { return initializeFailureStore; } /** * Set whether this CreateIndexRequest should initialize the failure store on data stream creation. This can be necessary when, for - * example, a failure occurs while trying to ingest a document into a data stream that has to be auto-created. + * example, a failure occurs while trying to ingest a document into a data stream that has to be auto-created. N.B. If true, failure + * store index creation will be performed regardless of whether the template indicates that the failure store is enabled. It is the + * caller's responsibility to ensure that this is correct. */ public CreateIndexRequest initializeFailureStore(boolean initializeFailureStore) { this.initializeFailureStore = initializeFailureStore; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index 96fae42b05505..48a2ec171b780 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -43,7 +43,7 @@ public class DeleteIndexRequest extends AcknowledgedRequest .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 801dbbdee0858..be7aaeec8f69e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; @@ -95,13 +94,7 @@ public static Feature[] fromRequest(RestRequest request) { private transient boolean includeDefaults = false; public GetIndexRequest() { - super( - DataStream.isFailureStoreFeatureFlagEnabled() - ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - : IndicesOptions.strictExpandOpen() - ); + super(IndicesOptions.strictExpandOpen()); } public GetIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java index dd4114c947174..84789d8a2acfb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -21,7 +22,9 @@ public class GetMappingsRequest extends ClusterInfoRequest { - public GetMappingsRequest() {} + public GetMappingsRequest(TimeValue masterTimeout) { + super(masterTimeout); + } public GetMappingsRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 3f54138581398..a12ba4f60c26a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -11,13 +11,14 @@ import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder< GetMappingsRequest, GetMappingsResponse, GetMappingsRequestBuilder> { - public GetMappingsRequestBuilder(ElasticsearchClient client, String... indices) { - super(client, GetMappingsAction.INSTANCE, new GetMappingsRequest().indices(indices)); + public GetMappingsRequestBuilder(ElasticsearchClient client, TimeValue masterTimeout, String... indices) { + super(client, GetMappingsAction.INSTANCE, new GetMappingsRequest(masterTimeout).indices(indices)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 7b782c6da5a84..05cc0d2cf05d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -82,7 +82,7 @@ public class PutMappingRequest extends AcknowledgedRequest im .allowClosedIndices(true) .allowAliasToMultipleIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 749470e181deb..24f8735b6bd7f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataMappingService; import org.elasticsearch.cluster.service.ClusterService; @@ -40,6 +42,7 @@ import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.SortedMap; /** * Put mapping action. @@ -106,7 +109,14 @@ protected void masterOperation( return; } - final String message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); + String message = checkForFailureStoreViolations(clusterService.state(), concreteIndices, request); + if (message != null) { + logger.warn(message); + listener.onFailure(new IllegalStateException(message)); + return; + } + + message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); if (message != null) { logger.warn(message); listener.onFailure(new IllegalStateException(message)); @@ -172,6 +182,33 @@ static void performMappingUpdate( metadataMappingService.putMapping(updateRequest, wrappedListener); } + static String checkForFailureStoreViolations(ClusterState clusterState, Index[] concreteIndices, PutMappingRequest request) { + // Requests that a cluster generates itself are permitted to make changes to mappings + // so that rolling upgrade scenarios still work. We check this via the request's origin. + if (Strings.isNullOrEmpty(request.origin()) == false) { + return null; + } + + List violations = new ArrayList<>(); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + for (Index index : concreteIndices) { + IndexAbstraction indexAbstraction = indicesLookup.get(index.getName()); + if (indexAbstraction != null) { + DataStream maybeDataStream = indexAbstraction.getParentDataStream(); + if (maybeDataStream != null && maybeDataStream.isFailureStoreIndex(index.getName())) { + violations.add(index.getName()); + } + } + } + + if (violations.isEmpty() == false) { + return "Cannot update mappings in " + + violations + + ": mappings for indices contained in data stream failure stores cannot be updated"; + } + return null; + } + static String checkForSystemIndexViolations(SystemIndices systemIndices, Index[] concreteIndices, PutMappingRequest request) { // Requests that a cluster generates itself are permitted to have a difference in mappings // so that rolling upgrade scenarios still work. We check this via the request's origin. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java index 9c5b6097b11bd..ebc9b0fea1be4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveClusterActionRequest.java @@ -9,12 +9,14 @@ package org.elasticsearch.action.admin.indices.resolve; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.tasks.CancellableTask; @@ -30,6 +32,7 @@ public class ResolveClusterActionRequest extends ActionRequest implements IndicesRequest.Replaceable { public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen(); + public static final String TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX = "ResolveClusterAction requires at least version"; private String[] names; /* @@ -65,12 +68,7 @@ public ResolveClusterActionRequest(String[] names, IndicesOptions indicesOptions public ResolveClusterActionRequest(StreamInput in) throws IOException { super(in); if (in.getTransportVersion().before(TransportVersions.V_8_13_0)) { - throw new UnsupportedOperationException( - "ResolveClusterAction requires at least version " - + TransportVersions.V_8_13_0.toReleaseVersion() - + " but was " - + in.getTransportVersion().toReleaseVersion() - ); + throw new UnsupportedOperationException(createVersionErrorMessage(in.getTransportVersion())); } this.names = in.readStringArray(); this.indicesOptions = IndicesOptions.readIndicesOptions(in); @@ -81,17 +79,21 @@ public ResolveClusterActionRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); if (out.getTransportVersion().before(TransportVersions.V_8_13_0)) { - throw new UnsupportedOperationException( - "ResolveClusterAction requires at least version " - + TransportVersions.V_8_13_0.toReleaseVersion() - + " but was " - + out.getTransportVersion().toReleaseVersion() - ); + throw new UnsupportedOperationException(createVersionErrorMessage(out.getTransportVersion())); } out.writeStringArray(names); indicesOptions.writeIndicesOptions(out); } + private String createVersionErrorMessage(TransportVersion versionFound) { + return Strings.format( + "%s %s but was %s", + TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX, + TransportVersions.V_8_13_0.toReleaseVersion(), + versionFound.toReleaseVersion() + ); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index f5c100b7884bb..4aa022aff1c80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -59,6 +59,7 @@ import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; +import java.util.stream.Stream; import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; @@ -598,12 +599,13 @@ private static void mergeResults( private static void enrichIndexAbstraction( ClusterState clusterState, - ResolvedExpression indexAbstraction, + ResolvedExpression resolvedExpression, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction.resource()); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + IndexAbstraction ia = indicesLookup.get(resolvedExpression.resource()); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { @@ -632,13 +634,24 @@ private static void enrichIndexAbstraction( ); } case ALIAS -> { - String[] indexNames = ia.getIndices().stream().map(Index::getName).toArray(String[]::new); + String[] indexNames = getAliasIndexStream(resolvedExpression, ia, indicesLookup).map(Index::getName) + .toArray(String[]::new); Arrays.sort(indexNames); aliases.add(new ResolvedAlias(ia.getName(), indexNames)); } case DATA_STREAM -> { DataStream dataStream = (DataStream) ia; - String[] backingIndices = dataStream.getIndices().stream().map(Index::getName).toArray(String[]::new); + Stream dataStreamIndices = resolvedExpression.selector() == null + ? dataStream.getIndices().stream() + : switch (resolvedExpression.selector()) { + case DATA -> dataStream.getBackingIndices().getIndices().stream(); + case FAILURES -> dataStream.getFailureIndices().getIndices().stream(); + case ALL_APPLICABLE -> Stream.concat( + dataStream.getBackingIndices().getIndices().stream(), + dataStream.getFailureIndices().getIndices().stream() + ); + }; + String[] backingIndices = dataStreamIndices.map(Index::getName).toArray(String[]::new); dataStreams.add(new ResolvedDataStream(dataStream.getName(), backingIndices, DataStream.TIMESTAMP_FIELD_NAME)); } default -> throw new IllegalStateException("unknown index abstraction type: " + ia.getType()); @@ -646,6 +659,52 @@ private static void enrichIndexAbstraction( } } + private static Stream getAliasIndexStream( + ResolvedExpression resolvedExpression, + IndexAbstraction ia, + SortedMap indicesLookup + ) { + Stream aliasIndices; + if (resolvedExpression.selector() == null) { + aliasIndices = ia.getIndices().stream(); + } else { + aliasIndices = switch (resolvedExpression.selector()) { + case DATA -> ia.getIndices().stream(); + case FAILURES -> { + assert ia.isDataStreamRelated() : "Illegal selector [failures] used on non data stream alias"; + yield ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()); + } + case ALL_APPLICABLE -> { + if (ia.isDataStreamRelated()) { + yield Stream.concat( + ia.getIndices().stream(), + ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()) + ); + } else { + yield ia.getIndices().stream(); + } + } + }; + } + return aliasIndices; + } + enum Attribute { OPEN, CLOSED, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java index c30a2a44274a7..50dbaf33d2e4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterAction.java @@ -51,7 +51,6 @@ public class TransportResolveClusterAction extends HandledTransportAction { private static final Logger logger = LogManager.getLogger(TransportResolveClusterAction.class); - private static final String TRANSPORT_VERSION_ERROR_MESSAGE = "ResolveClusterAction requires at least Transport Version"; public static final String NAME = "indices:admin/resolve/cluster"; public static final ActionType TYPE = new ActionType<>(NAME); @@ -141,7 +140,7 @@ protected void doExecuteForked(Task task, ResolveClusterActionRequest request, A RemoteClusterClient remoteClusterClient = remoteClusterService.getRemoteClusterClient( clusterAlias, searchCoordinationExecutor, - RemoteClusterService.DisconnectedStrategy.RECONNECT_IF_DISCONNECTED + RemoteClusterService.DisconnectedStrategy.FAIL_IF_DISCONNECTED ); var remoteRequest = new ResolveClusterActionRequest(originalIndices.indices(), request.indicesOptions()); // allow cancellation requests to propagate to remote clusters @@ -175,7 +174,13 @@ public void onFailure(Exception failure) { failure, ElasticsearchSecurityException.class ) instanceof ElasticsearchSecurityException ese) { - clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, ese.getMessage())); + /* + * some ElasticsearchSecurityExceptions come from the local cluster security interceptor after you've + * issued the client.execute call but before any call went to the remote cluster, so with an + * ElasticsearchSecurityException you can't tell whether the remote cluster is available or not, so mark + * it as connected=false + */ + clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(false, skipUnavailable, ese.getMessage())); } else if (ExceptionsHelper.unwrap(failure, IndexNotFoundException.class) instanceof IndexNotFoundException infe) { clusterInfoMap.put(clusterAlias, new ResolveClusterInfo(true, skipUnavailable, infe.getMessage())); } else { @@ -184,7 +189,7 @@ public void onFailure(Exception failure) { // this error at the Transport layer BEFORE it sends the request to the remote cluster, since there // are version guards on the Writeables for this Action, namely ResolveClusterActionRequest.writeTo if (cause instanceof UnsupportedOperationException - && cause.getMessage().contains(TRANSPORT_VERSION_ERROR_MESSAGE)) { + && cause.getMessage().contains(ResolveClusterActionRequest.TRANSPORT_VERSION_ERROR_MESSAGE_PREFIX)) { // Since this cluster does not have _resolve/cluster, we call the _resolve/index // endpoint to fill in the matching_indices field of the response for that cluster ResolveIndexAction.Request resolveIndexRequest = new ResolveIndexAction.Request( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index a677897d79633..c845d1a3854c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -21,6 +21,8 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -31,7 +33,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -53,8 +54,6 @@ public final class LazyRolloverAction extends ActionType { private static final Logger logger = LogManager.getLogger(LazyRolloverAction.class); - public static final NodeFeature DATA_STREAM_LAZY_ROLLOVER = new NodeFeature("data_stream.rollover.lazy"); - public static final LazyRolloverAction INSTANCE = new LazyRolloverAction(); public static final String NAME = "indices:admin/data_stream/lazy_rollover"; @@ -119,32 +118,38 @@ protected void masterOperation( : "The auto rollover action does not expect any other parameters in the request apart from the data stream name"; Metadata metadata = clusterState.metadata(); - DataStream dataStream = metadata.dataStreams().get(rolloverRequest.getRolloverTarget()); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + + DataStream dataStream = metadata.dataStreams().get(resolvedRolloverTarget.resource()); // Skip submitting the task if we detect that the lazy rollover has been already executed. - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); listener.onResponse(noopLazyRolloverResponse(targetIndices)); return; } // We evaluate the names of the source index as well as what our newly created index would be. final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, clusterState.metadata(), clusterState.routingTable()); - assert metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()) : "Auto-rollover applies only to data streams"; + assert metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()) : "Auto-rollover applies only to data streams"; String source = "lazy_rollover source [" + trialSourceIndexName + "] to target [" + trialRolloverIndexName + "]"; // We create a new rollover request to ensure that it doesn't contain any other parameters apart from the data stream name // This will provide a more resilient user experience - var newRolloverRequest = new RolloverRequest(rolloverRequest.getRolloverTarget(), null); - newRolloverRequest.setIndicesOptions(rolloverRequest.indicesOptions()); + var newRolloverRequest = new RolloverRequest(resolvedRolloverTarget.combined(), null); LazyRolloverTask rolloverTask = new LazyRolloverTask(newRolloverRequest, listener); lazyRolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } @@ -223,12 +228,19 @@ public ClusterState executeTask( AllocationActionMultiListener allocationActionMultiListener ) throws Exception { + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // If the data stream has been rolled over since it was marked for lazy rollover, this operation is a noop - final DataStream dataStream = currentState.metadata().dataStreams().get(rolloverRequest.getRolloverTarget()); + final DataStream dataStream = currentState.metadata().dataStreams().get(resolvedRolloverTarget.resource()); assert dataStream != null; - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); var noopResponse = noopLazyRolloverResponse(targetIndices); notifyAllListeners(rolloverTaskContexts, context -> context.getTask().listener.onResponse(noopResponse)); return currentState; @@ -237,7 +249,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), List.of(), @@ -246,7 +258,7 @@ public ClusterState executeTask( false, null, null, - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); results.add(rolloverResult); logger.trace("lazy rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 552ce727d4249..608d32d50a856 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -16,7 +16,8 @@ import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MapperService; @@ -81,7 +82,7 @@ public class RolloverRequest extends AcknowledgedRequest implem private RolloverConditions conditions = new RolloverConditions(); // the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); - private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosedAllowSelectors(); public RolloverRequest(StreamInput in) throws IOException { super(in); @@ -125,12 +126,15 @@ public ActionRequestValidationException validate() { ); } - var selector = indicesOptions.selectorOptions().defaultSelector(); - if (selector == IndexComponentSelector.ALL_APPLICABLE) { - validationException = addValidationError( - "rollover cannot be applied to both regular and failure indices at the same time", - validationException - ); + if (rolloverTarget != null) { + ResolvedExpression resolvedExpression = SelectorResolver.parseExpression(rolloverTarget, indicesOptions); + IndexComponentSelector selector = resolvedExpression.selector(); + if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) { + validationException = addValidationError( + "rollover cannot be applied to both regular and failure indices at the same time", + validationException + ); + } } return validationException; @@ -162,13 +166,6 @@ public IndicesOptions indicesOptions() { return indicesOptions; } - /** - * @return true of the rollover request targets the failure store, false otherwise. - */ - public boolean targetsFailureStore() { - return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.includeFailureIndices(); - } - public void setIndicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c5c874f9bcddf..4f0aa9c5bade4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -36,6 +36,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -149,8 +151,7 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState .matchOpen(request.indicesOptions().expandWildcardsOpen()) .matchClosed(request.indicesOptions().expandWildcardsClosed()) .build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - request.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); return state.blocks() @@ -170,11 +171,18 @@ protected void masterOperation( assert task instanceof CancellableTask; Metadata metadata = clusterState.metadata(); + + // Parse the rollover request's target since the expression it may contain a selector on it + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. - boolean targetFailureStore = rolloverRequest.targetsFailureStore(); final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), targetFailureStore @@ -183,7 +191,7 @@ protected void masterOperation( final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, metadata, clusterState.routingTable()); - boolean isDataStream = metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()); + boolean isDataStream = metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()); if (rolloverRequest.isLazy()) { if (isDataStream == false || rolloverRequest.getConditions().hasConditions()) { String message; @@ -201,7 +209,7 @@ protected void masterOperation( } if (rolloverRequest.isDryRun() == false) { metadataDataStreamsService.setRolloverOnWrite( - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), true, targetFailureStore, rolloverRequest.ackTimeout(), @@ -225,7 +233,7 @@ protected void masterOperation( final IndexAbstraction rolloverTargetAbstraction = clusterState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); if (rolloverTargetAbstraction.getType() == IndexAbstraction.Type.ALIAS && rolloverTargetAbstraction.isDataStreamRelated()) { listener.onFailure( new IllegalStateException("Aliases to data streams cannot be rolled over. Please rollover the data stream itself.") @@ -246,10 +254,10 @@ protected void masterOperation( final var statsIndicesOptions = new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.builder().matchClosed(true).allowEmptyExpressions(false).build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - rolloverRequest.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); - IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(rolloverRequest.getRolloverTarget()) + // Make sure to recombine any selectors on the stats request + IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(resolvedRolloverTarget.combined()) .clear() .indicesOptions(statsIndicesOptions) .docs(true) @@ -266,9 +274,7 @@ protected void masterOperation( listener.delegateFailureAndWrap((delegate, statsResponse) -> { AutoShardingResult rolloverAutoSharding = null; - final IndexAbstraction indexAbstraction = clusterState.metadata() - .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + final IndexAbstraction indexAbstraction = clusterState.metadata().getIndicesLookup().get(resolvedRolloverTarget.resource()); if (indexAbstraction.getType().equals(IndexAbstraction.Type.DATA_STREAM)) { DataStream dataStream = (DataStream) indexAbstraction; final Optional indexStats = Optional.ofNullable(statsResponse) @@ -492,14 +498,20 @@ public ClusterState executeTask( ) throws Exception { final var rolloverTask = rolloverTaskContext.getTask(); final var rolloverRequest = rolloverTask.rolloverRequest(); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); // Regenerate the rollover names, as a rollover could have happened in between the pre-check and the cluster state update final var rolloverNames = MetadataRolloverService.resolveRolloverNames( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); // Re-evaluate the conditions, now with our final source index name @@ -532,7 +544,7 @@ public ClusterState executeTask( final IndexAbstraction rolloverTargetAbstraction = currentState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); final IndexMetadataStats sourceIndexStats = rolloverTargetAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM ? IndexMetadataStats.fromStatsResponse(rolloverSourceIndex, rolloverTask.statsResponse()) @@ -541,7 +553,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), metConditions, @@ -550,7 +562,7 @@ public ClusterState executeTask( false, sourceIndexStats, rolloverTask.autoShardingResult(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); results.add(rolloverResult); logger.trace("rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 45d784d301bf1..d34e71f715a5d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -78,9 +78,7 @@ protected Iterator customXContentChunks(ToXContent.Params params) { getIndices().values().iterator(), indexSegments -> Iterators.concat( - ChunkedToXContentHelper.singleChunk( - (builder, p) -> builder.startObject(indexSegments.getIndex()).startObject(Fields.SHARDS) - ), + ChunkedToXContentHelper.chunk((builder, p) -> builder.startObject(indexSegments.getIndex()).startObject(Fields.SHARDS)), Iterators.flatMap( indexSegments.iterator(), indexSegment -> Iterators.concat( @@ -90,7 +88,7 @@ protected Iterator customXContentChunks(ToXContent.Params params) { indexSegment.iterator(), shardSegments -> Iterators.concat( - ChunkedToXContentHelper.singleChunk((builder, p) -> { + ChunkedToXContentHelper.chunk((builder, p) -> { builder.startObject(); builder.startObject(Fields.ROUTING); @@ -112,7 +110,7 @@ protected Iterator customXContentChunks(ToXContent.Params params) { shardSegments.iterator(), segment -> Iterators.concat( - ChunkedToXContentHelper.singleChunk((builder, p) -> { + ChunkedToXContentHelper.chunk((builder, p) -> { builder.startObject(segment.getName()); builder.field(Fields.GENERATION, segment.getGeneration()); builder.field(Fields.NUM_DOCS, segment.getNumDocs()); @@ -132,7 +130,7 @@ protected Iterator customXContentChunks(ToXContent.Params params) { return builder; }), getSegmentSortChunks(segment.getSegmentSort()), - ChunkedToXContentHelper.singleChunk((builder, p) -> { + ChunkedToXContentHelper.chunk((builder, p) -> { if (segment.attributes != null && segment.attributes.isEmpty() == false) { builder.field("attributes", segment.attributes); } @@ -141,13 +139,13 @@ protected Iterator customXContentChunks(ToXContent.Params params) { }) ) ), - ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endObject().endObject()) + ChunkedToXContentHelper.chunk((builder, p) -> builder.endObject().endObject()) ) ), ChunkedToXContentHelper.endArray() ) ), - ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endObject().endObject()) + ChunkedToXContentHelper.chunk((builder, p) -> builder.endObject().endObject()) ) ), ChunkedToXContentHelper.endObject() diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java index 50b46aa5e284f..dfb22a4f0cbae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java @@ -50,9 +50,9 @@ protected Iterator customXContentChunks(ToXContent.Params params) { return Iterators.flatMap( stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).iterator(), entry -> Iterators.concat( - ChunkedToXContentHelper.singleChunk((builder, p) -> builder.startObject(entry.getKey()).startArray("shards")), + ChunkedToXContentHelper.chunk((builder, p) -> builder.startObject(entry.getKey()).startArray("shards")), entry.getValue().iterator(), - ChunkedToXContentHelper.singleChunk((builder, p) -> builder.endArray().endObject()) + ChunkedToXContentHelper.chunk((builder, p) -> builder.endArray().endObject()) ) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index 5bdecd10075e6..6106e620521f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.NodeFeature; import java.util.ArrayList; import java.util.HashMap; @@ -22,9 +21,6 @@ public class IndexStats implements Iterable { - // feature was effectively reverted but we still need to keep this constant around - public static final NodeFeature REVERTED_TIER_CREATION_DATE = new NodeFeature("stats.tier_creation_date"); - private final String index; private final String uuid; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java deleted file mode 100644 index 558343db1023a..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.admin.indices.stats; - -import org.elasticsearch.features.FeatureSpecification; -import org.elasticsearch.features.NodeFeature; - -import java.util.Set; - -public class IndicesStatsFeatures implements FeatureSpecification { - - @Override - public Set getFeatures() { - return Set.of(IndexStats.REVERTED_TIER_CREATION_DATE); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 91e0e7cbc1dff..d6c9a3e2e544b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -204,7 +204,7 @@ protected Iterator customXContentChunks(ToXContent.Params params) { if (level == ClusterStatsLevel.INDICES || level == ClusterStatsLevel.SHARDS) { return Iterators.concat( - ChunkedToXContentHelper.singleChunk((builder, p) -> { + ChunkedToXContentHelper.chunk((builder, p) -> { commonStats(builder, p); return builder.startObject(Fields.INDICES); }), @@ -212,7 +212,7 @@ protected Iterator customXContentChunks(ToXContent.Params params) { getIndices().values().iterator(), indexStats -> Iterators.concat( - ChunkedToXContentHelper.singleChunk((builder, p) -> { + ChunkedToXContentHelper.chunk((builder, p) -> { builder.startObject(indexStats.getIndex()); builder.field("uuid", indexStats.getUuid()); if (indexStats.getHealth() != null) { @@ -257,7 +257,7 @@ protected Iterator customXContentChunks(ToXContent.Params params) { ChunkedToXContentHelper.endObject() ); } else { - return ChunkedToXContentHelper.singleChunk((builder, p) -> { + return ChunkedToXContentHelper.chunk((builder, p) -> { commonStats(builder, p); return builder; }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index f729455edcc24..4f61b89aeaf5e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -14,12 +14,17 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.local.LocalClusterStateRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV10; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -43,22 +48,23 @@ private GetComponentTemplateAction() { /** * Request that to retrieve one or more component templates */ - public static class Request extends MasterNodeReadRequest { + public static class Request extends LocalClusterStateRequest { @Nullable private String name; private boolean includeDefaults; - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - } - - public Request(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterTimeout, String name) { + super(masterTimeout); this.name = name; this.includeDefaults = false; } + /** + * NB prior to 9.0 get-component was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); @@ -70,17 +76,13 @@ public Request(StreamInput in) throws IOException { } @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - out.writeBoolean(includeDefaults); - } + public ActionRequestValidationException validate() { + return null; } @Override - public ActionRequestValidationException validate() { - return null; + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); } /** @@ -123,19 +125,6 @@ public static class Response extends ActionResponse implements ToXContentObject @Nullable private final RolloverConfiguration rolloverConfiguration; - public Response(StreamInput in) throws IOException { - super(in); - componentTemplates = in.readMap(ComponentTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); - } else { - rolloverConfiguration = null; - } - if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { - in.readOptionalWriteable(DataStreamGlobalRetention::read); - } - } - /** * Please use {@link GetComponentTemplateAction.Response#Response(Map)} */ @@ -183,6 +172,11 @@ public DataStreamGlobalRetention getGlobalRetention() { return null; } + /** + * NB prior to 9.0 get-component was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(componentTemplates, StreamOutput::writeWriteable); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 67f87476ea6a5..5d322bba17c5e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -14,12 +14,17 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.local.LocalClusterStateRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV10; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -40,17 +45,18 @@ private GetComposableIndexTemplateAction() { /** * Request that to retrieve one or more index templates */ - public static class Request extends MasterNodeReadRequest { + public static class Request extends LocalClusterStateRequest { @Nullable private final String name; private boolean includeDefaults; /** + * @param masterTimeout Timeout for waiting for new cluster state in case it is blocked. * @param name A template name or pattern, or {@code null} to retrieve all templates. */ - public Request(@Nullable String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterTimeout, @Nullable String name) { + super(masterTimeout); if (name != null && name.contains(",")) { throw new IllegalArgumentException("template name may not contain ','"); } @@ -58,6 +64,11 @@ public Request(@Nullable String name) { this.includeDefaults = false; } + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); @@ -68,15 +79,6 @@ public Request(StreamInput in) throws IOException { } } - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - out.writeBoolean(includeDefaults); - } - } - public void includeDefaults(boolean includeDefaults) { this.includeDefaults = includeDefaults; } @@ -90,6 +92,11 @@ public ActionRequestValidationException validate() { return null; } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + /** * The name of the index templates. */ @@ -124,19 +131,6 @@ public static class Response extends ActionResponse implements ToXContentObject @Nullable private final RolloverConfiguration rolloverConfiguration; - public Response(StreamInput in) throws IOException { - super(in); - indexTemplates = in.readMap(ComposableIndexTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); - } else { - rolloverConfiguration = null; - } - if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { - in.readOptionalWriteable(DataStreamGlobalRetention::read); - } - } - /** * Please use {@link GetComposableIndexTemplateAction.Response#Response(Map)} */ @@ -184,6 +178,11 @@ public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(indexTemplates, StreamOutput::writeWriteable); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index e3f765eb8ae88..1c911b4a0a97c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -9,38 +9,42 @@ package org.elasticsearch.action.admin.indices.template.get; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.local.LocalClusterStateRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV10; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; /** * Request that allows to retrieve index templates */ -public class GetIndexTemplatesRequest extends MasterNodeReadRequest { +public class GetIndexTemplatesRequest extends LocalClusterStateRequest { private final String[] names; - public GetIndexTemplatesRequest(String... names) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetIndexTemplatesRequest(TimeValue masterTimeout, String... names) { + super(masterTimeout); this.names = names; } + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) public GetIndexTemplatesRequest(StreamInput in) throws IOException { super(in); names = in.readStringArray(); } - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(names); - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -56,6 +60,11 @@ public ActionRequestValidationException validate() { return validationException; } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + /** * The names of the index templates. */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 97aabbd4631b7..ed53154cc5c93 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -8,15 +8,13 @@ */ package org.elasticsearch.action.admin.indices.template.get; -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; -public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder< - GetIndexTemplatesRequest, - GetIndexTemplatesResponse, - GetIndexTemplatesRequestBuilder> { +public class GetIndexTemplatesRequestBuilder extends ActionRequestBuilder { - public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, String... names) { - super(client, GetIndexTemplatesAction.INSTANCE, new GetIndexTemplatesRequest(names)); + public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, TimeValue masterTimeout, String... names) { + super(client, GetIndexTemplatesAction.INSTANCE, new GetIndexTemplatesRequest(masterTimeout, names)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 2d854d2c6fa45..69ae3f1db92ae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -26,11 +26,6 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont private final List indexTemplates; - public GetIndexTemplatesResponse(StreamInput in) throws IOException { - super(in); - indexTemplates = in.readCollectionAsList(IndexTemplateMetadata::readFrom); - } - public GetIndexTemplatesResponse(List indexTemplates) { this.indexTemplates = indexTemplates; } @@ -39,6 +34,11 @@ public List getIndexTemplates() { return indexTemplates; } + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indexTemplates); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index d04ccd39be04b..40e1a988dc2f2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -12,51 +12,61 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.local.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.HashMap; import java.util.Map; -public class TransportGetComponentTemplateAction extends TransportMasterNodeReadAction< +public class TransportGetComponentTemplateAction extends TransportLocalClusterStateAction< GetComponentTemplateAction.Request, GetComponentTemplateAction.Response> { private final ClusterSettings clusterSettings; + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + @SuppressWarnings("this-escape") @Inject public TransportGetComponentTemplateAction( TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + ActionFilters actionFilters ) { super( GetComponentTemplateAction.NAME, - transportService, - clusterService, - threadPool, actionFilters, - GetComponentTemplateAction.Request::new, - indexNameExpressionResolver, - GetComponentTemplateAction.Response::new, + transportService.getTaskManager(), + clusterService, EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); + + transportService.registerRequestHandler( + actionName, + executor, + false, + true, + GetComponentTemplateAction.Request::new, + (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel)) + ); } @Override @@ -65,12 +75,13 @@ protected ClusterBlockException checkBlock(GetComponentTemplateAction.Request re } @Override - protected void masterOperation( + protected void localClusterStateOperation( Task task, GetComponentTemplateAction.Request request, ClusterState state, ActionListener listener ) { + final var cancellableTask = (CancellableTask) task; Map allTemplates = state.metadata().componentTemplates(); Map results; @@ -93,6 +104,7 @@ protected void masterOperation( } } + cancellableTask.ensureNotCancelled(); if (request.includeDefaults()) { listener.onResponse( new GetComponentTemplateAction.Response( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index 608e7529d34bf..3d5800de2cb3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -12,51 +12,61 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.local.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.HashMap; import java.util.Map; -public class TransportGetComposableIndexTemplateAction extends TransportMasterNodeReadAction< +public class TransportGetComposableIndexTemplateAction extends TransportLocalClusterStateAction< GetComposableIndexTemplateAction.Request, GetComposableIndexTemplateAction.Response> { private final ClusterSettings clusterSettings; + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + @SuppressWarnings("this-escape") @Inject public TransportGetComposableIndexTemplateAction( TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + ActionFilters actionFilters ) { super( GetComposableIndexTemplateAction.NAME, - transportService, - clusterService, - threadPool, actionFilters, - GetComposableIndexTemplateAction.Request::new, - indexNameExpressionResolver, - GetComposableIndexTemplateAction.Response::new, + transportService.getTaskManager(), + clusterService, EsExecutors.DIRECT_EXECUTOR_SERVICE ); clusterSettings = clusterService.getClusterSettings(); + + transportService.registerRequestHandler( + actionName, + executor, + false, + true, + GetComposableIndexTemplateAction.Request::new, + (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel)) + ); } @Override @@ -65,12 +75,13 @@ protected ClusterBlockException checkBlock(GetComposableIndexTemplateAction.Requ } @Override - protected void masterOperation( + protected void localClusterStateOperation( Task task, GetComposableIndexTemplateAction.Request request, ClusterState state, ActionListener listener ) { + final var cancellableTask = (CancellableTask) task; Map allTemplates = state.metadata().templatesV2(); Map results; // If we did not ask for a specific name, then we return all templates @@ -91,6 +102,7 @@ protected void masterOperation( throw new ResourceNotFoundException("index template matching [" + request.name() + "] not found"); } } + cancellableTask.ensureNotCancelled(); if (request.includeDefaults()) { listener.onResponse( new GetComposableIndexTemplateAction.Response( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 99763d4d76dc6..c53ce4a6841ab 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -10,45 +10,53 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.local.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.injection.guice.Inject; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.List; import java.util.Map; -public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAction { +public class TransportGetIndexTemplatesAction extends TransportLocalClusterStateAction< + GetIndexTemplatesRequest, + GetIndexTemplatesResponse> { + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + @SuppressWarnings("this-escape") @Inject - public TransportGetIndexTemplatesAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver - ) { + public TransportGetIndexTemplatesAction(TransportService transportService, ClusterService clusterService, ActionFilters actionFilters) { super( GetIndexTemplatesAction.NAME, - transportService, - clusterService, - threadPool, actionFilters, - GetIndexTemplatesRequest::new, - indexNameExpressionResolver, - GetIndexTemplatesResponse::new, + transportService.getTaskManager(), + clusterService, EsExecutors.DIRECT_EXECUTOR_SERVICE ); + + transportService.registerRequestHandler( + actionName, + executor, + false, + true, + GetIndexTemplatesRequest::new, + (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel)) + ); } @Override @@ -57,12 +65,13 @@ protected ClusterBlockException checkBlock(GetIndexTemplatesRequest request, Clu } @Override - protected void masterOperation( + protected void localClusterStateOperation( Task task, GetIndexTemplatesRequest request, ClusterState state, ActionListener listener ) { + final var cancellableTask = (CancellableTask) task; List results; // If we did not ask for a specific name, then we return all templates @@ -84,6 +93,7 @@ protected void masterOperation( } } + cancellableTask.ensureNotCancelled(); listener.onResponse(new GetIndexTemplatesResponse(results)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index ce29d65ececf9..003be58d19554 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -12,16 +12,20 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.local.LocalClusterStateRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import java.util.Objects; -public class SimulateIndexTemplateRequest extends MasterNodeReadRequest { +public class SimulateIndexTemplateRequest extends LocalClusterStateRequest { private String indexName; @@ -30,14 +34,18 @@ public class SimulateIndexTemplateRequest extends MasterNodeReadRequest headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + public String getIndexName() { return indexName; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index a521dac60e96a..1a04b6e4d7633 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -12,13 +12,11 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; -import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; import org.elasticsearch.cluster.metadata.ResettableValue; import org.elasticsearch.cluster.metadata.Template; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -67,27 +65,11 @@ public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } - public SimulateIndexTemplateResponse(StreamInput in) throws IOException { - super(in); - resolvedTemplate = in.readOptionalWriteable(Template::new); - if (in.readBoolean()) { - int overlappingTemplatesCount = in.readInt(); - overlappingTemplates = Maps.newMapWithExpectedSize(overlappingTemplatesCount); - for (int i = 0; i < overlappingTemplatesCount; i++) { - String templateName = in.readString(); - overlappingTemplates.put(templateName, in.readStringCollectionAsList()); - } - } else { - this.overlappingTemplates = null; - } - rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) - ? in.readOptionalWriteable(RolloverConfiguration::new) - : null; - if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { - in.readOptionalWriteable(DataStreamGlobalRetention::read); - } - } - + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(resolvedTemplate); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java index 75cc72416a854..15015b910767e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java @@ -14,12 +14,16 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; -import org.elasticsearch.action.support.master.MasterNodeReadRequest; +import org.elasticsearch.action.support.local.LocalClusterStateRequest; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; import java.util.Objects; /** @@ -35,7 +39,7 @@ private SimulateTemplateAction() { super(NAME); } - public static class Request extends MasterNodeReadRequest { + public static class Request extends LocalClusterStateRequest { @Nullable private String templateName; @@ -44,26 +48,15 @@ public static class Request extends MasterNodeReadRequest { private TransportPutComposableIndexTemplateAction.Request indexTemplateRequest; private boolean includeDefaults = false; - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - } - - public Request(String templateName) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - if (templateName == null) { - throw new IllegalArgumentException("template name cannot be null"); - } + public Request(TimeValue masterTimeout, String templateName) { + super(masterTimeout); this.templateName = templateName; } - public Request(TransportPutComposableIndexTemplateAction.Request indexTemplateRequest) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - if (indexTemplateRequest == null) { - throw new IllegalArgumentException("index template body must be present"); - } - this.indexTemplateRequest = indexTemplateRequest; - } - + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until + * we no longer need to support calling this action remotely. + */ public Request(StreamInput in) throws IOException { super(in); templateName = in.readOptionalString(); @@ -73,16 +66,6 @@ public Request(StreamInput in) throws IOException { } } - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalString(templateName); - out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { - out.writeBoolean(includeDefaults); - } - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -98,6 +81,11 @@ public ActionRequestValidationException validate() { return validationException; } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + @Nullable public String getTemplateName() { return templateName; @@ -112,11 +100,6 @@ public TransportPutComposableIndexTemplateAction.Request getIndexTemplateRequest return indexTemplateRequest; } - public Request templateName(String templateName) { - this.templateName = templateName; - return this; - } - public Request indexTemplateRequest(TransportPutComposableIndexTemplateAction.Request indexTemplateRequest) { this.indexTemplateRequest = indexTemplateRequest; return this; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 5f98852148ed4..74936128caa25 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -11,7 +11,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.local.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -20,7 +21,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; @@ -42,7 +43,6 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -65,7 +65,7 @@ import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveLifecycle; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveSettings; -public class TransportSimulateIndexTemplateAction extends TransportMasterNodeReadAction< +public class TransportSimulateIndexTemplateAction extends TransportLocalClusterStateAction< SimulateIndexTemplateRequest, SimulateIndexTemplateResponse> { @@ -77,14 +77,18 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea private final ClusterSettings clusterSettings; private final boolean isDslOnlyMode; + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + @SuppressWarnings("this-escape") @Inject public TransportSimulateIndexTemplateAction( TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, MetadataIndexTemplateService indexTemplateService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NamedXContentRegistry xContentRegistry, IndicesService indicesService, SystemIndices systemIndices, @@ -92,13 +96,9 @@ public TransportSimulateIndexTemplateAction( ) { super( SimulateIndexTemplateAction.NAME, - transportService, - clusterService, - threadPool, actionFilters, - SimulateIndexTemplateRequest::new, - indexNameExpressionResolver, - SimulateIndexTemplateResponse::new, + transportService.getTaskManager(), + clusterService, EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.indexTemplateService = indexTemplateService; @@ -108,10 +108,19 @@ public TransportSimulateIndexTemplateAction( this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); + + transportService.registerRequestHandler( + actionName, + executor, + false, + true, + SimulateIndexTemplateRequest::new, + (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel)) + ); } @Override - protected void masterOperation( + protected void localClusterStateOperation( Task task, SimulateIndexTemplateRequest request, ClusterState state, @@ -214,8 +223,7 @@ public static ClusterState resolveTemporaryState( .build(); final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) - // handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version - .eventIngestedRange(getEventIngestedRange(indexName, simulatedState), simulatedState.getMinTransportVersion()) + .eventIngestedRange(getEventIngestedRange(indexName, simulatedState)) .settings(dummySettings) .build(); return ClusterState.builder(simulatedState) @@ -304,8 +312,7 @@ public static Template resolveTemplate( dummySettings.put(templateSettings); final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) - // handle mixed-cluster states by passing in minTransportVersion to reset event.ingested range to UNKNOWN if an older version - .eventIngestedRange(getEventIngestedRange(indexName, simulatedState), simulatedState.getMinTransportVersion()) + .eventIngestedRange(getEventIngestedRange(indexName, simulatedState)) .settings(dummySettings) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index 30bbad0b57df0..692f027b23f9e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -11,26 +11,26 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.local.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -48,7 +48,7 @@ * Handles simulating an index template either by name (looking it up in the * cluster state), or by a provided template configuration */ -public class TransportSimulateTemplateAction extends TransportMasterNodeReadAction< +public class TransportSimulateTemplateAction extends TransportLocalClusterStateAction< SimulateTemplateAction.Request, SimulateIndexTemplateResponse> { @@ -60,14 +60,18 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi private final ClusterSettings clusterSettings; private final boolean isDslOnlyMode; + /** + * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until + * we no longer need to support calling this action remotely. + */ + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + @SuppressWarnings("this-escape") @Inject public TransportSimulateTemplateAction( TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, MetadataIndexTemplateService indexTemplateService, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, NamedXContentRegistry xContentRegistry, IndicesService indicesService, SystemIndices systemIndices, @@ -75,13 +79,9 @@ public TransportSimulateTemplateAction( ) { super( SimulateTemplateAction.NAME, - transportService, - clusterService, - threadPool, actionFilters, - SimulateTemplateAction.Request::new, - indexNameExpressionResolver, - SimulateIndexTemplateResponse::new, + transportService.getTaskManager(), + clusterService, EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.indexTemplateService = indexTemplateService; @@ -91,10 +91,19 @@ public TransportSimulateTemplateAction( this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders(); this.clusterSettings = clusterService.getClusterSettings(); this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings()); + + transportService.registerRequestHandler( + actionName, + executor, + false, + true, + SimulateTemplateAction.Request::new, + (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel)) + ); } @Override - protected void masterOperation( + protected void localClusterStateOperation( Task task, SimulateTemplateAction.Request request, ClusterState state, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java index 62a9b88cb6a57..5851549977eab 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java @@ -14,22 +14,10 @@ import java.util.Set; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_ADDITION; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES; -import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING; +import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_IGNORED_FIELDS; public class BulkFeatures implements FeatureSpecification { public Set getFeatures() { - return Set.of( - SIMULATE_MAPPING_VALIDATION, - SIMULATE_MAPPING_VALIDATION_TEMPLATES, - SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS, - SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS, - SIMULATE_MAPPING_ADDITION, - SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING - ); + return Set.of(SIMULATE_IGNORED_FIELDS); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 4df228240add5..dd473869fb2d9 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.node.NodeClient; @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -98,6 +99,7 @@ final class BulkOperation extends ActionRunnable { private final Set failedRolloverRequests = ConcurrentCollections.newConcurrentSet(); private final Map shortCircuitShardFailures = ConcurrentCollections.newConcurrentMap(); private final FailureStoreMetrics failureStoreMetrics; + private final DataStreamFailureStoreSettings dataStreamFailureStoreSettings; BulkOperation( Task task, @@ -111,7 +113,8 @@ final class BulkOperation extends ActionRunnable { LongSupplier relativeTimeProvider, long startTimeNanos, ActionListener listener, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { this( task, @@ -127,7 +130,8 @@ final class BulkOperation extends ActionRunnable { listener, new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), new FailureStoreDocumentConverter(), - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ); } @@ -145,7 +149,8 @@ final class BulkOperation extends ActionRunnable { ActionListener listener, ClusterStateObserver observer, FailureStoreDocumentConverter failureStoreDocumentConverter, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { super(listener); this.task = task; @@ -164,6 +169,7 @@ final class BulkOperation extends ActionRunnable { this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); this.shortCircuitShardFailures.putAll(bulkRequest.incrementalState().shardLevelFailures()); this.failureStoreMetrics = failureStoreMetrics; + this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; } @Override @@ -210,11 +216,9 @@ private void rollOverFailureStores(Runnable runnable) { } try (RefCountingRunnable refs = new RefCountingRunnable(runnable)) { for (String dataStream : failureStoresToBeRolledOver) { - RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() + RolloverRequest rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. @@ -544,7 +548,7 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques // Do not redirect documents to a failure store that were already headed to one. var isFailureStoreRequest = isFailureStoreRequest(docWriteRequest); if (isFailureStoreRequest == false - && failureStoreCandidate.isFailureStoreEnabled() + && failureStoreCandidate.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings) && error instanceof VersionConflictEngineException == false && error instanceof EsRejectedExecutionException == false) { // Prepare the data stream failure store if necessary @@ -577,7 +581,7 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques if (isFailureStoreRequest) { return IndexDocFailureStoreStatus.FAILED; } - if (failureStoreCandidate.isFailureStoreEnabled() == false) { + if (failureStoreCandidate.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings) == false) { return IndexDocFailureStoreStatus.NOT_ENABLED; } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 12d3aa67ca9bb..28e345a527085 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContent; @@ -158,13 +157,14 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Iterator toXContentChunked(ToXContent.Params params) { - return ChunkedToXContent.builder(params).object(ob -> ob.append((b, p) -> { - b.field(ERRORS, hasFailures()); - b.field(TOOK, tookInMillis); + return Iterators.concat(Iterators.single((builder, p) -> { + builder.startObject(); + builder.field(ERRORS, hasFailures()); + builder.field(TOOK, tookInMillis); if (ingestTookInMillis != BulkResponse.NO_INGEST_TOOK) { - b.field(INGEST_TOOK, ingestTookInMillis); + builder.field(INGEST_TOOK, ingestTookInMillis); } - return b; - }).array(ITEMS, Iterators.forArray(responses))); + return builder.startArray(ITEMS); + }), Iterators.forArray(responses), Iterators.single((builder, p) -> builder.endArray().endObject())); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java index e83bca4b661c9..24534826f8e3e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportAbstractBulkAction.java @@ -343,9 +343,11 @@ public boolean isForceExecution() { * @param indexName The index name to check. * @param metadata Cluster state metadata. * @param epochMillis A timestamp to use when resolving date math in the index name. - * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store - * or if it matches a template that has a data stream failure store enabled. Returns false if the index name corresponds to a - * data stream, but it doesn't have the failure store enabled. Returns null when it doesn't correspond to a data stream. + * @return true if this is not a simulation, and the given index name corresponds to a data stream with a failure store, or if it + * matches a template that has a data stream failure store enabled, or if it matches a data stream template with no failure store + * option specified and the name matches the cluster setting to enable the failure store. Returns false if the index name + * corresponds to a data stream, but it doesn't have the failure store enabled by one of those conditions. Returns null when it + * doesn't correspond to a data stream. */ protected abstract Boolean resolveFailureStore(String indexName, Metadata metadata, long epochMillis); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index e2c73349b93ec..523381321ada7 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.admin.indices.create.AutoCreateAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -26,7 +25,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -35,6 +34,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings; import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -44,7 +44,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Nullable; -import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; import org.elasticsearch.indices.SystemIndices; @@ -80,11 +79,11 @@ public class TransportBulkAction extends TransportAbstractBulkAction { private static final Logger logger = LogManager.getLogger(TransportBulkAction.class); public static final String LAZY_ROLLOVER_ORIGIN = "lazy_rollover"; - private final FeatureService featureService; private final NodeClient client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final OriginSettingClient rolloverClient; private final FailureStoreMetrics failureStoreMetrics; + private final DataStreamFailureStoreSettings dataStreamFailureStoreSettings; @Inject public TransportBulkAction( @@ -92,27 +91,27 @@ public TransportBulkAction( TransportService transportService, ClusterService clusterService, IngestService ingestService, - FeatureService featureService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { this( threadPool, transportService, clusterService, ingestService, - featureService, client, actionFilters, indexNameExpressionResolver, indexingPressure, systemIndices, threadPool::relativeTimeInNanos, - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ); } @@ -121,14 +120,14 @@ public TransportBulkAction( TransportService transportService, ClusterService clusterService, IngestService ingestService, - FeatureService featureService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, LongSupplier relativeTimeProvider, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { this( TYPE, @@ -137,14 +136,14 @@ public TransportBulkAction( transportService, clusterService, ingestService, - featureService, client, actionFilters, indexNameExpressionResolver, indexingPressure, systemIndices, relativeTimeProvider, - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ); } @@ -155,14 +154,14 @@ public TransportBulkAction( TransportService transportService, ClusterService clusterService, IngestService ingestService, - FeatureService featureService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, LongSupplier relativeTimeProvider, - FailureStoreMetrics failureStoreMetrics + FailureStoreMetrics failureStoreMetrics, + DataStreamFailureStoreSettings dataStreamFailureStoreSettings ) { super( bulkAction, @@ -176,8 +175,8 @@ public TransportBulkAction( systemIndices, relativeTimeProvider ); + this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings; Objects.requireNonNull(relativeTimeProvider); - this.featureService = featureService; this.client = client; this.indexNameExpressionResolver = indexNameExpressionResolver; this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN); @@ -276,13 +275,12 @@ private void populateMissingTargets( // A map for memorizing which indices exist. Map indexExistence = new HashMap<>(); Function indexExistenceComputation = (index) -> indexNameExpressionResolver.hasIndexAbstraction(index, state); - boolean lazyRolloverFeature = featureService.clusterHasFeature(state, LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER); boolean lazyRolloverFailureStoreFeature = DataStream.isFailureStoreFeatureFlagEnabled(); Set indicesThatRequireAlias = new HashSet<>(); for (DocWriteRequest request : bulkRequest.requests) { // Delete requests should not attempt to create the index (if the index does not exist), unless an external versioning is used. - if (request.opType() == OpType.DELETE + if (request.opType() == DocWriteRequest.OpType.DELETE && request.versionType() != VersionType.EXTERNAL && request.versionType() != VersionType.EXTERNAL_GTE) { continue; @@ -321,18 +319,15 @@ private void populateMissingTargets( } } // Determine which data streams and failure stores need to be rolled over. - if (lazyRolloverFeature) { - DataStream dataStream = state.metadata().dataStreams().get(request.index()); - if (dataStream != null) { - if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) { - dataStreamsToBeRolledOver.add(request.index()); - } else if (lazyRolloverFailureStoreFeature - && writeToFailureStore - && dataStream.getFailureIndices().isRolloverOnWrite()) { - failureStoresToBeRolledOver.add(request.index()); - } + DataStream dataStream = state.metadata().dataStreams().get(request.index()); + if (dataStream != null) { + if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) { + dataStreamsToBeRolledOver.add(request.index()); + } else if (lazyRolloverFailureStoreFeature && writeToFailureStore && dataStream.getFailureIndices().isRolloverOnWrite()) { + failureStoresToBeRolledOver.add(request.index()); } } + } } @@ -418,11 +413,7 @@ private void rollOverDataStreams( RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.masterNodeTimeout(bulkRequest.timeout); if (targetFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. @@ -431,9 +422,8 @@ private void rollOverDataStreams( @Override public void onResponse(RolloverResponse result) { logger.debug( - "Data stream{} {} has {} over, the latest index is {}", - rolloverRequest.targetsFailureStore() ? " failure store" : "", - dataStream, + "Data stream [{}] has {} over, the latest index is {}", + rolloverRequest.getRolloverTarget(), result.isRolledOver() ? "been successfully rolled" : "skipped rolling", result.getNewIndex() ); @@ -492,7 +482,7 @@ private void failRequestsWhenPrerequisiteActionFailed( static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, IndexAbstraction indexAbstraction) { DocWriteRequest.OpType opType = writeRequest.opType(); - if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { + if ((opType == DocWriteRequest.OpType.CREATE || opType == DocWriteRequest.OpType.INDEX) == false) { // op type not create or index, then bail early return; } @@ -588,7 +578,8 @@ void executeBulk( relativeTimeNanosProvider, startTimeNanos, listener, - failureStoreMetrics + failureStoreMetrics, + dataStreamFailureStoreSettings ).run(); } @@ -596,7 +587,7 @@ void executeBulk( * See {@link #resolveFailureStore(String, Metadata, long)} */ // Visibility for testing - static Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { + Boolean resolveFailureInternal(String indexName, Metadata metadata, long epochMillis) { if (DataStream.isFailureStoreFeatureFlagEnabled() == false) { return null; } @@ -604,7 +595,7 @@ static Boolean resolveFailureInternal(String indexName, Metadata metadata, long if (resolution != null) { return resolution; } - return resolveFailureStoreFromTemplate(indexName, metadata); + return resolveFailureStoreFromTemplate(indexName, metadata, epochMillis); } @Override @@ -619,7 +610,7 @@ protected Boolean resolveFailureStore(String indexName, Metadata metadata, long * @param epochMillis A timestamp to use when resolving date math in the index name. * @return true if the given index name corresponds to an existing data stream with a failure store enabled. */ - private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { + private Boolean resolveFailureStoreFromMetadata(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { return null; } @@ -636,7 +627,7 @@ private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadat DataStream targetDataStream = DataStream.resolveDataStream(indexAbstraction, metadata); // We will store the failure if the write target belongs to a data stream with a failure store. - return targetDataStream != null && targetDataStream.isFailureStoreEnabled(); + return targetDataStream != null && targetDataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings); } /** @@ -644,18 +635,20 @@ private static Boolean resolveFailureStoreFromMetadata(String indexName, Metadat * a data stream feature, the method returns true/false only if it is a data stream template, otherwise null. * @param indexName The index name to check. * @param metadata Cluster state metadata. - * @return true the associated index template has failure store enabled, false if the failure store is disabled or it's not specified, - * and null if the template is not a data stream template. - * Visible for testing + * @param epochMillis A timestamp to use when resolving date math in the index name. + * @return true the associated index template has failure store enabled, false if the failure store is disabled, true or false according + * to the cluster setting if there is a data stream template with no failure store option specified, and null if no template is + * found or if the template is not a data stream template. */ @Nullable - static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata) { + private Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metadata, long epochMillis) { if (indexName == null) { return null; } // Check to see if the index name matches any templates such that an index would have been attributed // We don't check v1 templates at all because failure stores can only exist on data streams via a v2 template + // N.B. This currently does date math resolution itself and does *not* use epochMillis (it gets the system time again) String template = MetadataIndexTemplateService.findV2Template(metadata, indexName, false); if (template != null) { // Check if this is a data stream template or if it is just a normal index. @@ -666,7 +659,12 @@ static Boolean resolveFailureStoreFromTemplate(String indexName, Metadata metada composableIndexTemplate, metadata.componentTemplates() ).mapAndGet(DataStreamOptions.Template::toDataStreamOptions); - return dataStreamOptions != null && dataStreamOptions.isFailureStoreEnabled(); + return DataStream.isFailureStoreEffectivelyEnabled( + dataStreamOptions, + dataStreamFailureStoreSettings, + IndexNameExpressionResolver.resolveDateMathExpression(indexName, epochMillis), + systemIndices + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 74143cc5c059b..89cee714a9ff2 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -48,9 +48,11 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -326,7 +328,8 @@ static boolean executeBulkItemRequest( if (opType == DocWriteRequest.OpType.UPDATE) { final UpdateRequest updateRequest = (UpdateRequest) context.getCurrent(); try { - updateResult = updateHelper.prepare(updateRequest, context.getPrimary(), nowInMillisSupplier); + var gFields = getStoredFieldsSpec(context.getPrimary()); + updateResult = updateHelper.prepare(updateRequest, context.getPrimary(), nowInMillisSupplier, gFields); } catch (Exception failure) { // we may fail translating a update to index or delete operation // we use index result to communicate failure while translating update request @@ -401,6 +404,16 @@ static boolean executeBulkItemRequest( return true; } + private static String[] getStoredFieldsSpec(IndexShard indexShard) { + if (InferenceMetadataFieldsMapper.isEnabled(indexShard.mapperService().mappingLookup())) { + if (indexShard.mapperService().mappingLookup().inferenceFields().size() > 0) { + // Retrieves the inference metadata field containing the inference results for all semantic fields defined in the mapping. + return new String[] { RoutingFieldMapper.NAME, InferenceMetadataFieldsMapper.NAME }; + } + } + return new String[] { RoutingFieldMapper.NAME }; + } + private static boolean handleMappingUpdateRequired( BulkPrimaryExecutionContext context, MappingUpdatePerformer mappingUpdater, diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 1353fa78595ef..18c420d99f525 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -9,6 +9,8 @@ package org.elasticsearch.action.bulk; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.IndexableField; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.template.post.TransportSimulateIndexTemplateAction; @@ -33,6 +35,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; @@ -40,6 +43,8 @@ import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.LuceneDocument; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -60,6 +65,7 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -77,14 +83,8 @@ * shards are not actually modified). */ public class TransportSimulateBulkAction extends TransportAbstractBulkAction { - public static final NodeFeature SIMULATE_MAPPING_VALIDATION = new NodeFeature("simulate.mapping.validation"); - public static final NodeFeature SIMULATE_MAPPING_VALIDATION_TEMPLATES = new NodeFeature("simulate.mapping.validation.templates"); - public static final NodeFeature SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS = new NodeFeature( - "simulate.component.template.substitutions" - ); - public static final NodeFeature SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS = new NodeFeature("simulate.index.template.substitutions"); - public static final NodeFeature SIMULATE_MAPPING_ADDITION = new NodeFeature("simulate.mapping.addition"); - public static final NodeFeature SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING = new NodeFeature("simulate.support.non.template.mapping"); + public static final NodeFeature SIMULATE_IGNORED_FIELDS = new NodeFeature("simulate.ignored.fields"); + private final IndicesService indicesService; private final NamedXContentRegistry xContentRegistry; private final Set indexSettingProviders; @@ -137,12 +137,13 @@ protected void doInternalExecute( DocWriteRequest docRequest = bulkRequest.requests.get(i); assert docRequest instanceof IndexRequest : "TransportSimulateBulkAction should only ever be called with IndexRequests"; IndexRequest request = (IndexRequest) docRequest; - Exception mappingValidationException = validateMappings( + Tuple, Exception> validationResult = validateMappings( componentTemplateSubstitutions, indexTemplateSubstitutions, mappingAddition, request ); + Exception mappingValidationException = validationResult.v2(); responses.set( i, BulkItemResponse.success( @@ -155,6 +156,7 @@ protected void doInternalExecute( request.source(), request.getContentType(), request.getExecutedPipelines(), + validationResult.v1(), mappingValidationException ) ) @@ -168,11 +170,12 @@ protected void doInternalExecute( /** * This creates a temporary index with the mappings of the index in the request, and then attempts to index the source from the request * into it. If there is a mapping exception, that exception is returned. On success the returned exception is null. - * @parem componentTemplateSubstitutions The component template definitions to use in place of existing ones for validation + * @param componentTemplateSubstitutions The component template definitions to use in place of existing ones for validation * @param request The IndexRequest whose source will be validated against the mapping (if it exists) of its index - * @return a mapping exception if the source does not match the mappings, otherwise null + * @return a Tuple containing: (1) in v1 the names of any fields that would be ignored upon indexing and (2) in v2 the mapping + * exception if the source does not match the mappings, otherwise null */ - private Exception validateMappings( + private Tuple, Exception> validateMappings( Map componentTemplateSubstitutions, Map indexTemplateSubstitutions, Map mappingAddition, @@ -189,6 +192,7 @@ private Exception validateMappings( ClusterState state = clusterService.state(); Exception mappingValidationException = null; + Collection ignoredFields = List.of(); IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(request.index()); try { if (indexAbstraction != null @@ -275,7 +279,7 @@ private Exception validateMappings( ); CompressedXContent mappings = template.mappings(); CompressedXContent mergedMappings = mergeMappings(mappings, mappingAddition); - validateUpdatedMappings(mappings, mergedMappings, request, sourceToParse); + ignoredFields = validateUpdatedMappings(mappings, mergedMappings, request, sourceToParse); } else { List matchingTemplates = findV1Templates(simulatedState.metadata(), request.index(), false); if (matchingTemplates.isEmpty() == false) { @@ -289,7 +293,7 @@ private Exception validateMappings( xContentRegistry ); final CompressedXContent combinedMappings = mergeMappings(new CompressedXContent(mappingsMap), mappingAddition); - validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + ignoredFields = validateUpdatedMappings(null, combinedMappings, request, sourceToParse); } else if (indexAbstraction != null && mappingAddition.isEmpty() == false) { /* * The index matched no templates of any kind, including the substitutions. But it might have a mapping. So we @@ -298,7 +302,7 @@ private Exception validateMappings( MappingMetadata mappingFromIndex = clusterService.state().metadata().index(indexAbstraction.getName()).mapping(); CompressedXContent currentIndexCompressedXContent = mappingFromIndex == null ? null : mappingFromIndex.source(); CompressedXContent combinedMappings = mergeMappings(currentIndexCompressedXContent, mappingAddition); - validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + ignoredFields = validateUpdatedMappings(null, combinedMappings, request, sourceToParse); } else { /* * The index matched no templates and had no mapping of its own. If there were component template substitutions @@ -306,27 +310,28 @@ private Exception validateMappings( * and validate. */ final CompressedXContent combinedMappings = mergeMappings(null, mappingAddition); - validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + ignoredFields = validateUpdatedMappings(null, combinedMappings, request, sourceToParse); } } } } catch (Exception e) { mappingValidationException = e; } - return mappingValidationException; + return Tuple.tuple(ignoredFields, mappingValidationException); } /* - * Validates that when updatedMappings are applied + * Validates that when updatedMappings are applied. If any fields would be ignored while indexing, then those field names are returned. + * Otherwise the returned Collection is empty. */ - private void validateUpdatedMappings( + private Collection validateUpdatedMappings( @Nullable CompressedXContent originalMappings, @Nullable CompressedXContent updatedMappings, IndexRequest request, SourceToParse sourceToParse ) throws IOException { if (updatedMappings == null) { - return; // no validation to do + return List.of(); // no validation to do } Settings dummySettings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) @@ -343,7 +348,7 @@ private void validateUpdatedMappings( .settings(dummySettings) .putMapping(new MappingMetadata(updatedMappings)) .build(); - indicesService.withTempIndexService(originalIndexMetadata, indexService -> { + Engine.Index result = indicesService.withTempIndexService(originalIndexMetadata, indexService -> { indexService.mapperService().merge(updatedIndexMetadata, MapperService.MergeReason.MAPPING_UPDATE); return IndexShard.prepareIndex( indexService.mapperService(), @@ -360,6 +365,24 @@ private void validateUpdatedMappings( 0 ); }); + final Collection ignoredFields; + if (result == null) { + ignoredFields = List.of(); + } else { + List luceneDocuments = result.parsedDoc().docs(); + assert luceneDocuments == null || luceneDocuments.size() == 1 : "Expected a single lucene document from index attempt"; + if (luceneDocuments != null && luceneDocuments.size() == 1) { + ignoredFields = luceneDocuments.getFirst() + .getFields() + .stream() + .filter(field -> field.name().equals(IgnoredFieldMapper.NAME) && field instanceof StringField) + .map(IndexableField::stringValue) + .toList(); + } else { + ignoredFields = List.of(); + } + } + return ignoredFields; } private static CompressedXContent mergeMappings(@Nullable CompressedXContent originalMapping, Map mappingAddition) diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java index a0a05138406c5..62caba8f7ed96 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java @@ -9,16 +9,18 @@ package org.elasticsearch.action.datastreams; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.index.Index; +import java.util.ArrayList; import java.util.List; import java.util.SortedMap; -import java.util.stream.Stream; public class DataStreamsActionUtil { @@ -47,25 +49,79 @@ public static IndicesOptions updateIndicesOptions(IndicesOptions indicesOptions) return indicesOptions; } - public static Stream resolveConcreteIndexNames( + public static List resolveConcreteIndexNames( IndexNameExpressionResolver indexNameExpressionResolver, ClusterState clusterState, String[] names, IndicesOptions indicesOptions ) { - List abstractionNames = getDataStreamNames(indexNameExpressionResolver, clusterState, names, indicesOptions); + List abstractionNames = indexNameExpressionResolver.dataStreams( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); - return abstractionNames.stream().flatMap(abstractionName -> { + List results = new ArrayList<>(abstractionNames.size()); + for (ResolvedExpression abstractionName : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName.resource()); + assert indexAbstraction != null; + if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { + selectDataStreamIndicesNames( + (DataStream) indexAbstraction, + IndexComponentSelector.FAILURES.equals(abstractionName.selector()), + results + ); + } + } + return results; + } + + /** + * Resolves a list of expressions into data stream names and then collects the concrete indices + * that are applicable for those data streams based on the selector provided in the arguments. + * @param indexNameExpressionResolver resolver object + * @param clusterState state to query + * @param names data stream expressions + * @param selector which component indices of the data stream should be returned + * @param indicesOptions options for expression resolution + * @return A stream of concrete index names that belong to the components specified + * on the data streams returned from the expressions given + */ + public static List resolveConcreteIndexNamesWithSelector( + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterState clusterState, + String[] names, + IndexComponentSelector selector, + IndicesOptions indicesOptions + ) { + assert indicesOptions.allowSelectors() == false : "If selectors are enabled, use resolveConcreteIndexNames instead"; + List abstractionNames = indexNameExpressionResolver.dataStreamNames( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); + SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); + + List results = new ArrayList<>(abstractionNames.size()); + for (String abstractionName : abstractionNames) { IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { - DataStream dataStream = (DataStream) indexAbstraction; - List indices = dataStream.getIndices(); - return indices.stream().map(Index::getName); - } else { - return Stream.empty(); + if (selector.shouldIncludeData()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, false, results); + } + if (selector.shouldIncludeFailures()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, true, results); + } } - }); + } + return results; + } + + private static void selectDataStreamIndicesNames(DataStream indexAbstraction, boolean failureStore, List accumulator) { + for (Index index : indexAbstraction.getDataStreamIndices(failureStore).getIndices()) { + accumulator.add(index.getName()); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java index 9266bae439b73..82afeec752378 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java @@ -38,8 +38,6 @@ public DataStreamsStatsAction() { public static class Request extends BroadcastRequest { public Request() { - // this doesn't really matter since data stream name resolution isn't affected by IndicesOptions and - // a data stream's backing indices are retrieved from its metadata super( null, IndicesOptions.builder() @@ -58,10 +56,9 @@ public Request() { .allowAliasToMultipleIndices(true) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index 4f647d4f02884..640c88918ffc0 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -61,7 +61,7 @@ public static class Request extends MasterNodeRequest implements Indice .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 93c40ad18cc8a..c55957787aee7 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -72,10 +72,11 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); + private boolean includeDefaults = false; private boolean verbose = false; @@ -234,6 +235,7 @@ public static class DataStreamInfo implements SimpleDiffable, To private final DataStream dataStream; private final ClusterHealthStatus dataStreamStatus; + private final boolean failureStoreEffectivelyEnabled; // Must be serialized independently of dataStream as depends on settings @Nullable private final String indexTemplate; @Nullable @@ -247,6 +249,7 @@ public static class DataStreamInfo implements SimpleDiffable, To public DataStreamInfo( DataStream dataStream, + boolean failureStoreEffectivelyEnabled, ClusterHealthStatus dataStreamStatus, @Nullable String indexTemplate, @Nullable String ilmPolicyName, @@ -256,6 +259,7 @@ public DataStreamInfo( @Nullable Long maximumTimestamp ) { this.dataStream = dataStream; + this.failureStoreEffectivelyEnabled = failureStoreEffectivelyEnabled; this.dataStreamStatus = dataStreamStatus; this.indexTemplate = indexTemplate; this.ilmPolicyName = ilmPolicyName; @@ -267,22 +271,32 @@ public DataStreamInfo( @SuppressWarnings("unchecked") DataStreamInfo(StreamInput in) throws IOException { - this( - DataStream.read(in), - ClusterHealthStatus.readFrom(in), - in.readOptionalString(), - in.readOptionalString(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null, - in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readMap(Index::new, IndexProperties::new) : Map.of(), - in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readBoolean() : true, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalVLong() : null - ); + this.dataStream = DataStream.read(in); + this.failureStoreEffectivelyEnabled = in.getTransportVersion() + .onOrAfter(TransportVersions.FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING) + ? in.readBoolean() + : dataStream.isFailureStoreExplicitlyEnabled(); // Revert to the behaviour before this field was added + this.dataStreamStatus = ClusterHealthStatus.readFrom(in); + this.indexTemplate = in.readOptionalString(); + this.ilmPolicyName = in.readOptionalString(); + this.timeSeries = in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) + ? in.readOptionalWriteable(TimeSeries::new) + : null; + this.indexSettingsValues = in.getTransportVersion().onOrAfter(V_8_11_X) + ? in.readMap(Index::new, IndexProperties::new) + : Map.of(); + this.templatePreferIlmValue = in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readBoolean() : true; + this.maximumTimestamp = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalVLong() : null; } public DataStream getDataStream() { return dataStream; } + public boolean isFailureStoreEffectivelyEnabled() { + return failureStoreEffectivelyEnabled; + } + public ClusterHealthStatus getDataStreamStatus() { return dataStreamStatus; } @@ -318,6 +332,9 @@ public Long getMaximumTimestamp() { @Override public void writeTo(StreamOutput out) throws IOException { dataStream.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_ENABLED_BY_CLUSTER_SETTING)) { + out.writeBoolean(failureStoreEffectivelyEnabled); + } dataStreamStatus.writeTo(out); out.writeOptionalString(indexTemplate); out.writeOptionalString(ilmPolicyName); @@ -398,7 +415,7 @@ public XContentBuilder toXContent( } if (DataStream.isFailureStoreFeatureFlagEnabled()) { builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName()); - builder.field(FAILURE_STORE_ENABLED.getPreferredName(), dataStream.isFailureStoreEnabled()); + builder.field(FAILURE_STORE_ENABLED.getPreferredName(), failureStoreEffectivelyEnabled); builder.field( DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(), dataStream.getFailureIndices().isRolloverOnWrite() @@ -477,6 +494,7 @@ public boolean equals(Object o) { DataStreamInfo that = (DataStreamInfo) o; return templatePreferIlmValue == that.templatePreferIlmValue && Objects.equals(dataStream, that.dataStream) + && failureStoreEffectivelyEnabled == that.failureStoreEffectivelyEnabled && dataStreamStatus == that.dataStreamStatus && Objects.equals(indexTemplate, that.indexTemplate) && Objects.equals(ilmPolicyName, that.ilmPolicyName) @@ -490,6 +508,7 @@ public int hashCode() { return Objects.hash( dataStream, dataStreamStatus, + failureStoreEffectivelyEnabled, indexTemplate, ilmPolicyName, timeSeries, diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java index 580cf92d15e2c..2eed45e5afa6d 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java @@ -23,8 +23,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.features.FeatureService; -import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.Index; import java.util.List; @@ -43,8 +41,6 @@ public class DataStreamAutoShardingService { private static final Logger logger = LogManager.getLogger(DataStreamAutoShardingService.class); public static final String DATA_STREAMS_AUTO_SHARDING_ENABLED = "data_streams.auto_sharding.enabled"; - public static final NodeFeature DATA_STREAM_AUTO_SHARDING_FEATURE = new NodeFeature("data_stream.auto_sharding"); - public static final Setting> DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING = Setting.listSetting( "data_streams.auto_sharding.excludes", List.of(), @@ -101,7 +97,6 @@ public class DataStreamAutoShardingService { ); private final ClusterService clusterService; private final boolean isAutoShardingEnabled; - private final FeatureService featureService; private final LongSupplier nowSupplier; private volatile TimeValue increaseShardsCooldown; private volatile TimeValue reduceShardsCooldown; @@ -109,12 +104,7 @@ public class DataStreamAutoShardingService { private volatile int maxWriteThreads; private volatile List dataStreamExcludePatterns; - public DataStreamAutoShardingService( - Settings settings, - ClusterService clusterService, - FeatureService featureService, - LongSupplier nowSupplier - ) { + public DataStreamAutoShardingService(Settings settings, ClusterService clusterService, LongSupplier nowSupplier) { this.clusterService = clusterService; this.isAutoShardingEnabled = settings.getAsBoolean(DATA_STREAMS_AUTO_SHARDING_ENABLED, false); this.increaseShardsCooldown = DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.get(settings); @@ -122,7 +112,6 @@ public DataStreamAutoShardingService( this.minWriteThreads = CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS.get(settings); this.maxWriteThreads = CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS.get(settings); this.dataStreamExcludePatterns = DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.get(settings); - this.featureService = featureService; this.nowSupplier = nowSupplier; } @@ -168,15 +157,6 @@ public AutoShardingResult calculate(ClusterState state, DataStream dataStream, @ return NOT_APPLICABLE_RESULT; } - if (featureService.clusterHasFeature(state, DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE) == false) { - logger.debug( - "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] because the cluster " - + "doesn't have the auto sharding feature", - dataStream.getName() - ); - return NOT_APPLICABLE_RESULT; - } - if (dataStreamExcludePatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, dataStream.getName()))) { logger.debug( "Data stream [{}] is excluded from auto sharding via the [{}] setting", diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index a43d29501a7ee..401bd7a27c6fa 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -63,7 +63,7 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java index b054d12890366..c2b7de8d5df8b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java @@ -94,7 +94,7 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 62771230636c1..cce01aca7685a 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -82,7 +82,7 @@ public String[] indices() { @Override public IndicesOptions indicesOptions() { - return IndicesOptions.STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED; + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java index 802f5d196569a..fce925d868532 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/RequestDispatcher.java @@ -95,7 +95,7 @@ final class RequestDispatcher { for (String index : indices) { final GroupShardsIterator shardIts; try { - shardIts = clusterService.operationRouting().searchShards(clusterState, new String[] { index }, null, null, null, null); + shardIts = clusterService.operationRouting().searchShards(clusterState, new String[] { index }, null, null); } catch (Exception e) { onIndexFailure.accept(index, e); continue; diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 22537f1f51216..e6469ab75299e 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.VersionType; @@ -52,7 +51,6 @@ public class MultiGetRequest extends ActionRequest CompositeIndicesRequest, RealtimeRequest, ToXContentObject { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MultiGetRequest.class); private static final ParseField DOCS = new ParseField("docs"); private static final ParseField INDEX = new ParseField("_index"); diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index a2c7c8664e81a..29b926598ac32 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; @@ -109,7 +108,7 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { if (iterator == null) { return null; } - return new PlainShardIterator(iterator.shardId(), iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList()); + return PlainShardIterator.allSearchableShards(iterator); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java index 01dc705d7146b..e5087a790a292 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -81,7 +82,7 @@ protected void doExecute(Task task, final MultiGetRequest request, final ActionL lastResolvedIndex = Tuple.tuple(item.index(), concreteSingleIndex); } item.routing(clusterState.metadata().resolveIndexRouting(item.routing(), item.index())); - shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex, item.id(), item.routing()); + shardId = OperationRouting.shardId(clusterState, concreteSingleIndex, item.id(), item.routing()); } catch (RoutingMissingException e) { responses.set(i, newItemFailure(e.getIndex().getName(), e.getId(), e)); continue; diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 0fa770df8e4ef..d9a04acc0466e 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; @@ -113,7 +112,7 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { if (iterator == null) { return null; } - return new PlainShardIterator(iterator.shardId(), iterator.getShardRoutings().stream().filter(ShardRouting::isSearchable).toList()); + return PlainShardIterator.allSearchableShards(iterator); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index d5b8b657bd14e..4343451256920 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -51,6 +51,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.OptionalInt; import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -78,7 +79,6 @@ public class IndexRequest extends ReplicatedWriteRequest implement private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_10_X; private static final Supplier ID_GENERATOR = UUIDs::base64UUID; - private static final Supplier K_SORTED_TIME_BASED_ID_GENERATOR = UUIDs::base64TimeBasedKOrderedUUID; /** * Max length of the source document to include into string() @@ -705,9 +705,18 @@ public void autoGenerateId() { } public void autoGenerateTimeBasedId() { + autoGenerateTimeBasedId(OptionalInt.empty()); + } + + /** + * Set the {@code #id()} to an automatically generated one, optimized for storage (compression) efficiency. + * If a routing hash is passed, it is included in the generated id starting at 9 bytes before the end. + * @param hash optional routing hash value, used to route requests by id to the right shard. + */ + public void autoGenerateTimeBasedId(OptionalInt hash) { assertBeforeGeneratingId(); autoGenerateTimestamp(); - id(K_SORTED_TIME_BASED_ID_GENERATOR.get()); + id(UUIDs.base64TimeBasedKOrderedUUIDWithHash(hash)); } private void autoGenerateTimestamp() { @@ -899,12 +908,18 @@ public IndexRequest setRequireAlias(boolean requireAlias) { } /** - * Transient flag denoting that the local request should be routed to a failure store. Not persisted across the wire. + * Returns a transient flag denoting that the local request should be routed to a failure store. Not persisted across the wire. N.B. If + * true, the failure store will be used regardless of whether the metadata indicates that the failure store is enabled. */ public boolean isWriteToFailureStore() { return writeToFailureStore; } + /** + * Sets a transient flag denoting that the local request should be routed to a failure store. Not persisted across the wire. N.B. If + * true, the failure store will be used regardless of whether the metadata indicates that the failure store is enabled. It is the + * caller's responsibility to ensure that this is correct. + */ public IndexRequest setWriteToFailureStore(boolean writeToFailureStore) { this.writeToFailureStore = writeToFailureStore; return this; diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java index 9d883cb075ede..307996a4c72cb 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateIndexResponse.java @@ -24,6 +24,7 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.Collection; import java.util.List; /** @@ -34,6 +35,7 @@ public class SimulateIndexResponse extends IndexResponse { private final BytesReference source; private final XContentType sourceXContentType; + private final Collection ignoredFields; private final Exception exception; @SuppressWarnings("this-escape") @@ -47,6 +49,11 @@ public SimulateIndexResponse(StreamInput in) throws IOException { } else { this.exception = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_IGNORED_FIELDS)) { + this.ignoredFields = in.readStringCollectionAsList(); + } else { + this.ignoredFields = List.of(); + } } @SuppressWarnings("this-escape") @@ -57,6 +64,7 @@ public SimulateIndexResponse( BytesReference source, XContentType sourceXContentType, List pipelines, + Collection ignoredFields, @Nullable Exception exception ) { // We don't actually care about most of the IndexResponse fields: @@ -73,6 +81,7 @@ public SimulateIndexResponse( this.source = source; this.sourceXContentType = sourceXContentType; setShardInfo(ShardInfo.EMPTY); + this.ignoredFields = ignoredFields; this.exception = exception; } @@ -84,6 +93,16 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field("_source", XContentHelper.convertToMap(source, false, sourceXContentType).v2()); assert executedPipelines != null : "executedPipelines is null when it shouldn't be - we always list pipelines in simulate mode"; builder.array("executed_pipelines", executedPipelines.toArray()); + if (ignoredFields.isEmpty() == false) { + builder.startArray("ignored_fields"); + for (String ignoredField : ignoredFields) { + builder.startObject(); + builder.field("field", ignoredField); + builder.endObject(); + } + ; + builder.endArray(); + } if (exception != null) { builder.startObject("error"); ElasticsearchException.generateThrowableXContent(builder, params, exception); @@ -105,6 +124,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { out.writeException(exception); } + if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_IGNORED_FIELDS)) { + out.writeStringCollection(ignoredFields); + } } public Exception getException() { diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 47abfe266c524..73e6a0306247d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -231,7 +231,7 @@ public final void start() { } @Override - public final void run() { + protected final void run() { for (final SearchShardIterator iterator : toSkipShardsIts) { assert iterator.skip(); skipShard(iterator); @@ -286,7 +286,7 @@ private static boolean assertExecuteOnStartThread() { return true; } - protected void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { + private void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { if (throttleConcurrentRequests) { var pendingExecutions = pendingExecutionsPerNode.computeIfAbsent( shard.getNodeId(), @@ -349,7 +349,7 @@ protected abstract void executePhaseOnShard( * of the next phase. If there are no successful operations in the context when this method is executed the search is aborted and * a response is returned to the user indicating that all shards have failed. */ - protected void executeNextPhase(SearchPhase currentPhase, Supplier nextPhaseSupplier) { + protected void executeNextPhase(String currentPhase, Supplier nextPhaseSupplier) { /* This is the main search phase transition where we move to the next phase. If all shards * failed or if there was a failure and partial results are not allowed, then we immediately * fail. Otherwise we continue to the next phase. @@ -360,7 +360,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug(() -> "All shards failed for phase: [" + currentPhase.getName() + "]", cause); + logger.debug(() -> "All shards failed for phase: [" + currentPhase + "]", cause); onPhaseFailure(currentPhase, "all shards failed", cause); } else { Boolean allowPartialResults = request.allowPartialSearchResults(); @@ -373,7 +373,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier int numShardFailures = shardSearchFailures.length; shardSearchFailures = ExceptionsHelper.groupBy(shardSearchFailures); Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase.getName()), cause); + logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase), cause); } onPhaseFailure(currentPhase, "Partial shards failure", null); } else { @@ -386,7 +386,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier successfulOps.get(), skippedOps.get(), getNumShards(), - currentPhase.getName() + currentPhase ); } onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); @@ -400,7 +400,7 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier .collect(Collectors.joining(",")); logger.trace( "[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})", - currentPhase.getName(), + currentPhase, nextPhase.getName(), resultsFrom, clusterStateVersion @@ -413,11 +413,11 @@ protected void executeNextPhase(SearchPhase currentPhase, Supplier private void executePhase(SearchPhase phase) { try { phase.run(); - } catch (Exception e) { + } catch (RuntimeException e) { if (logger.isDebugEnabled()) { logger.debug(() -> format("Failed to execute [%s] while moving to [%s] phase", request, phase.getName()), e); } - onPhaseFailure(phase, "", e); + onPhaseFailure(phase.getName(), "", e); } } @@ -521,7 +521,6 @@ void onShardFailure(final int shardIndex, SearchShardTarget shardTarget, Excepti successfulOps.decrementAndGet(); // if this shard was successful before (initial phase) we have to adjust the counter } } - results.consumeShardFailure(shardIndex); } private static boolean isTaskCancelledException(Exception e) { @@ -694,8 +693,8 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At * @param msg an optional message * @param cause the cause of the phase failure */ - public void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { - raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); + public void onPhaseFailure(String phase, String msg, Throwable cause) { + raisePhaseFailure(new SearchPhaseExecutionException(phase, msg, cause, buildShardFailures())); } /** @@ -740,7 +739,7 @@ void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connecti * @see #onShardResult(SearchPhaseResult, SearchShardIterator) */ private void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() - executeNextPhase(this, this::getNextPhase); + executeNextPhase(getName(), this::getNextPhase); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java index ba1afaf4678fb..7890a0f9f9738 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SubSearchSourceBuilder; @@ -49,7 +48,8 @@ public class CanMatchNodeRequest extends TransportRequest implements IndicesRequ private final SearchType searchType; private final Boolean requestCache; private final boolean allowPartialSearchResults; - private final Scroll scroll; + @Nullable + private final TimeValue scroll; private final int numberOfShards; private final long nowInMillis; @Nullable @@ -195,7 +195,7 @@ public CanMatchNodeRequest(StreamInput in) throws IOException { ); } } - scroll = in.readOptionalWriteable(Scroll::new); + scroll = in.readOptionalTimeValue(); requestCache = in.readOptionalBoolean(); allowPartialSearchResults = in.readBoolean(); numberOfShards = in.readVInt(); @@ -216,7 +216,7 @@ public void writeTo(StreamOutput out) throws IOException { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scroll); out.writeOptionalBoolean(requestCache); out.writeBoolean(allowPartialSearchResults); out.writeVInt(numberOfShards); diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index eaf62d1e57e66..d45a8a6f01cd1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -42,7 +42,6 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.BiFunction; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.core.Types.forciblyCast; @@ -74,7 +73,9 @@ final class CanMatchPreFilterSearchPhase { private final Executor executor; private final boolean requireAtLeastOneMatch; - private final CanMatchSearchPhaseResults results; + private final FixedBitSet possibleMatches; + private final MinAndMax[] minAndMaxes; + private int numPossibleMatches; private final CoordinatorRewriteContextProvider coordinatorRewriteContextProvider; CanMatchPreFilterSearchPhase( @@ -105,12 +106,13 @@ final class CanMatchPreFilterSearchPhase { this.requireAtLeastOneMatch = requireAtLeastOneMatch; this.coordinatorRewriteContextProvider = coordinatorRewriteContextProvider; this.executor = executor; - results = new CanMatchSearchPhaseResults(shardsIts.size()); - + final int size = shardsIts.size(); + possibleMatches = new FixedBitSet(size); + minAndMaxes = new MinAndMax[size]; // we compute the shard index based on the natural order of the shards // that participate in the search request. This means that this number is // consistent between two requests that target the same shards. - final SearchShardIterator[] naturalOrder = new SearchShardIterator[shardsIts.size()]; + final SearchShardIterator[] naturalOrder = new SearchShardIterator[size]; int i = 0; for (SearchShardIterator shardsIt : shardsIts) { naturalOrder[i++] = shardsIt; @@ -138,7 +140,7 @@ private void runCoordinatorRewritePhase() { request, searchShardIterator.getOriginalIndices().indicesOptions(), Collections.emptyList(), - getNumShards(), + shardsIts.size(), timeProvider.absoluteStartMillis(), searchShardIterator.getClusterAlias() ); @@ -177,7 +179,27 @@ private void runCoordinatorRewritePhase() { private void consumeResult(boolean canMatch, ShardSearchRequest request) { CanMatchShardResponse result = new CanMatchShardResponse(canMatch, null); result.setShardIndex(request.shardRequestIndex()); - results.consumeResult(result, () -> {}); + consumeResult(result, () -> {}); + } + + private void consumeResult(CanMatchShardResponse result, Runnable next) { + try { + final boolean canMatch = result.canMatch(); + final MinAndMax minAndMax = result.estimatedMinAndMax(); + if (canMatch || minAndMax != null) { + consumeResult(result.getShardIndex(), canMatch, minAndMax); + } + } finally { + next.run(); + } + } + + private synchronized void consumeResult(int shardIndex, boolean canMatch, MinAndMax minAndMax) { + if (canMatch) { + possibleMatches.set(shardIndex); + numPossibleMatches++; + } + minAndMaxes[shardIndex] = minAndMax; } private void checkNoMissingShards(GroupShardsIterator shards) { @@ -235,32 +257,38 @@ protected void doRun() { continue; } + var sendingTarget = entry.getKey(); try { - searchTransportService.sendCanMatch(getConnection(entry.getKey()), canMatchNodeRequest, task, new ActionListener<>() { - @Override - public void onResponse(CanMatchNodeResponse canMatchNodeResponse) { - assert canMatchNodeResponse.getResponses().size() == canMatchNodeRequest.getShardLevelRequests().size(); - for (int i = 0; i < canMatchNodeResponse.getResponses().size(); i++) { - CanMatchNodeResponse.ResponseOrFailure response = canMatchNodeResponse.getResponses().get(i); - if (response.getResponse() != null) { - CanMatchShardResponse shardResponse = response.getResponse(); - shardResponse.setShardIndex(shardLevelRequests.get(i).getShardRequestIndex()); - onOperation(shardResponse.getShardIndex(), shardResponse); - } else { - Exception failure = response.getException(); - assert failure != null; - onOperationFailed(shardLevelRequests.get(i).getShardRequestIndex(), failure); + searchTransportService.sendCanMatch( + nodeIdToConnection.apply(sendingTarget.clusterAlias, sendingTarget.nodeId), + canMatchNodeRequest, + task, + new ActionListener<>() { + @Override + public void onResponse(CanMatchNodeResponse canMatchNodeResponse) { + assert canMatchNodeResponse.getResponses().size() == canMatchNodeRequest.getShardLevelRequests().size(); + for (int i = 0; i < canMatchNodeResponse.getResponses().size(); i++) { + CanMatchNodeResponse.ResponseOrFailure response = canMatchNodeResponse.getResponses().get(i); + if (response.getResponse() != null) { + CanMatchShardResponse shardResponse = response.getResponse(); + shardResponse.setShardIndex(shardLevelRequests.get(i).getShardRequestIndex()); + onOperation(shardResponse.getShardIndex(), shardResponse); + } else { + Exception failure = response.getException(); + assert failure != null; + onOperationFailed(shardLevelRequests.get(i).getShardRequestIndex(), failure); + } } } - } - @Override - public void onFailure(Exception e) { - for (CanMatchNodeRequest.Shard shard : shardLevelRequests) { - onOperationFailed(shard.getShardRequestIndex(), e); + @Override + public void onFailure(Exception e) { + for (CanMatchNodeRequest.Shard shard : shardLevelRequests) { + onOperationFailed(shard.getShardRequestIndex(), e); + } } } - }); + ); } catch (Exception e) { for (CanMatchNodeRequest.Shard shard : shardLevelRequests) { onOperationFailed(shard.getShardRequestIndex(), e); @@ -271,7 +299,7 @@ public void onFailure(Exception e) { private void onOperation(int idx, CanMatchShardResponse response) { failedResponses.set(idx, null); - results.consumeResult(response, () -> { + consumeResult(response, () -> { if (countDown.countDown()) { finishRound(); } @@ -280,7 +308,8 @@ private void onOperation(int idx, CanMatchShardResponse response) { private void onOperationFailed(int idx, Exception e) { failedResponses.set(idx, e); - results.consumeShardFailure(idx); + // we have to carry over shard failures in order to account for them in the response. + consumeResult(idx, true, null); if (countDown.countDown()) { finishRound(); } @@ -334,14 +363,14 @@ private CanMatchNodeRequest createCanMatchRequest(Map.Entry { - private final FixedBitSet possibleMatches; - private final MinAndMax[] minAndMaxes; - private int numPossibleMatches; - - CanMatchSearchPhaseResults(int size) { - super(size); - possibleMatches = new FixedBitSet(size); - minAndMaxes = new MinAndMax[size]; - } - - @Override - void consumeResult(CanMatchShardResponse result, Runnable next) { - try { - final boolean canMatch = result.canMatch(); - final MinAndMax minAndMax = result.estimatedMinAndMax(); - if (canMatch || minAndMax != null) { - consumeResult(result.getShardIndex(), canMatch, minAndMax); - } - } finally { - next.run(); - } - } - - @Override - boolean hasResult(int shardIndex) { - return false; // unneeded - } - - @Override - void consumeShardFailure(int shardIndex) { - // we have to carry over shard failures in order to account for them in the response. - consumeResult(shardIndex, true, null); - } - - private synchronized void consumeResult(int shardIndex, boolean canMatch, MinAndMax minAndMax) { - if (canMatch) { - possibleMatches.set(shardIndex); - numPossibleMatches++; - } - minAndMaxes[shardIndex] = minAndMax; - } - - synchronized int getNumPossibleMatches() { - return numPossibleMatches; - } - - synchronized FixedBitSet getPossibleMatches() { - return possibleMatches; - } - - @Override - Stream getSuccessfulResults() { - return Stream.empty(); - } - - @Override - public void close() {} - } - - private GroupShardsIterator getIterator( - CanMatchSearchPhaseResults results, - GroupShardsIterator shardsIts - ) { - FixedBitSet possibleMatches = results.getPossibleMatches(); + private synchronized GroupShardsIterator getIterator(GroupShardsIterator shardsIts) { // TODO: pick the local shard when possible - if (requireAtLeastOneMatch && results.getNumPossibleMatches() == 0) { + if (requireAtLeastOneMatch && numPossibleMatches == 0) { // this is a special case where we have no hit but we need to get at least one search response in order // to produce a valid search result with all the aggs etc. // Since it's possible that some of the shards that we're skipping are @@ -491,11 +448,11 @@ private GroupShardsIterator getIterator( iter.skip(true); } } - if (shouldSortShards(results.minAndMaxes) == false) { + if (shouldSortShards(minAndMaxes) == false) { return shardsIts; } FieldSortBuilder fieldSort = FieldSortBuilder.getPrimaryFieldSortOrNull(request.source()); - return new GroupShardsIterator<>(sortShards(shardsIts, results.minAndMaxes, fieldSort.order())); + return new GroupShardsIterator<>(sortShards(shardsIts, minAndMaxes, fieldSort.order())); } private static List sortShards( diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index cc8c4becea9a9..faeb552530e47 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -39,6 +39,9 @@ * @see CountedCollector#onFailure(int, SearchShardTarget, Exception) */ final class DfsQueryPhase extends SearchPhase { + + public static final String NAME = "dfs_query"; + private final SearchPhaseResults queryResult; private final List searchResults; private final AggregatedDfs dfs; @@ -56,7 +59,7 @@ final class DfsQueryPhase extends SearchPhase { Function, SearchPhase> nextPhaseFactory, AbstractSearchAsyncAction context ) { - super("dfs_query"); + super(NAME); this.progressListener = context.getTask().getProgressListener(); this.queryResult = queryResult; this.searchResults = searchResults; @@ -68,13 +71,13 @@ final class DfsQueryPhase extends SearchPhase { } @Override - public void run() { + protected void run() { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early final CountedCollector counter = new CountedCollector<>( queryResult, searchResults.size(), - () -> context.executeNextPhase(this, () -> nextPhaseFactory.apply(queryResult)), + () -> context.executeNextPhase(NAME, () -> nextPhaseFactory.apply(queryResult)), context ); @@ -106,7 +109,7 @@ protected void innerOnResponse(QuerySearchResult response) { response.setSearchProfileDfsPhaseResult(dfsResult.searchProfileDfsPhaseResult()); counter.onResult(response); } catch (Exception e) { - context.onPhaseFailure(DfsQueryPhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index e8d94c32bdcc7..b0b3f15265920 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -31,12 +31,15 @@ * forwards to the next phase immediately. */ final class ExpandSearchPhase extends SearchPhase { + + static final String NAME = "expand"; + private final AbstractSearchAsyncAction context; private final SearchHits searchHits; private final Supplier nextPhase; ExpandSearchPhase(AbstractSearchAsyncAction context, SearchHits searchHits, Supplier nextPhase) { - super("expand"); + super(NAME); this.context = context; this.searchHits = searchHits; this.nextPhase = nextPhase; @@ -51,7 +54,7 @@ private boolean isCollapseRequest() { } @Override - public void run() { + protected void run() { if (isCollapseRequest() == false || searchHits.getHits().length == 0) { onPhaseDone(); } else { @@ -123,7 +126,7 @@ private void doRun() { } private void phaseFailure(Exception ex) { - context.onPhaseFailure(this, "failed to expand hits", ex); + context.onPhaseFailure(NAME, "failed to expand hits", ex); } private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) { @@ -168,6 +171,6 @@ private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilde } private void onPhaseDone() { - context.executeNextPhase(this, nextPhase); + context.executeNextPhase(NAME, nextPhase); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java index d8671bcadf86d..2e98d50196490 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java @@ -33,6 +33,9 @@ * @see org.elasticsearch.index.mapper.LookupRuntimeFieldType */ final class FetchLookupFieldsPhase extends SearchPhase { + + static final String NAME = "fetch_lookup_fields"; + private final AbstractSearchAsyncAction context; private final SearchResponseSections searchResponse; private final AtomicArray queryResults; @@ -42,7 +45,7 @@ final class FetchLookupFieldsPhase extends SearchPhase { SearchResponseSections searchResponse, AtomicArray queryResults ) { - super("fetch_lookup_fields"); + super(NAME); this.context = context; this.searchResponse = searchResponse; this.queryResults = queryResults; @@ -74,7 +77,7 @@ private static List groupLookupFieldsByClusterAlias(SearchHits searchHi } @Override - public void run() { + protected void run() { final List clusters = groupLookupFieldsByClusterAlias(searchResponse.hits); if (clusters.isEmpty()) { context.sendSearchResponse(searchResponse, queryResults); @@ -129,7 +132,7 @@ public void onResponse(MultiSearchResponse items) { } } if (failure != null) { - context.onPhaseFailure(FetchLookupFieldsPhase.this, "failed to fetch lookup fields", failure); + context.onPhaseFailure(NAME, "failed to fetch lookup fields", failure); } else { context.sendSearchResponse(searchResponse, queryResults); } @@ -137,7 +140,7 @@ public void onResponse(MultiSearchResponse items) { @Override public void onFailure(Exception e) { - context.onPhaseFailure(FetchLookupFieldsPhase.this, "failed to fetch lookup fields", e); + context.onPhaseFailure(NAME, "failed to fetch lookup fields", e); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 0fbface3793a8..119cfcab76105 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -34,6 +34,9 @@ * Then it reaches out to all relevant shards to fetch the topN hits. */ final class FetchSearchPhase extends SearchPhase { + + static final String NAME = "fetch"; + private final AtomicArray searchPhaseShardResults; private final BiFunction, SearchPhase> nextPhaseFactory; private final AbstractSearchAsyncAction context; @@ -70,7 +73,7 @@ final class FetchSearchPhase extends SearchPhase { @Nullable SearchPhaseController.ReducedQueryPhase reducedQueryPhase, BiFunction, SearchPhase> nextPhaseFactory ) { - super("fetch"); + super(NAME); if (context.getNumShards() != resultConsumer.getNumShards()) { throw new IllegalStateException( "number of shards must match the length of the query results but doesn't:" @@ -90,7 +93,7 @@ final class FetchSearchPhase extends SearchPhase { } @Override - public void run() { + protected void run() { context.execute(new AbstractRunnable() { @Override @@ -100,7 +103,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception e) { - context.onPhaseFailure(FetchSearchPhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } }); } @@ -222,7 +225,7 @@ public void innerOnResponse(FetchSearchResult result) { progressListener.notifyFetchResult(shardIndex); counter.onResult(result); } catch (Exception e) { - context.onPhaseFailure(FetchSearchPhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } } @@ -269,9 +272,9 @@ private void moveToNextPhase( AtomicArray fetchResultsArr, SearchPhaseController.ReducedQueryPhase reducedQueryPhase ) { - context.executeNextPhase(this, () -> { + context.executeNextPhase(NAME, () -> { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); - context.addReleasable(resp::decRef); + context.addReleasable(resp); return nextPhaseFactory.apply(resp, searchPhaseShardResults); }); } diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index 199228c9f992c..e9302883457e1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -37,6 +37,8 @@ */ public class RankFeaturePhase extends SearchPhase { + static final String NAME = "rank-feature"; + private static final Logger logger = LogManager.getLogger(RankFeaturePhase.class); private final AbstractSearchAsyncAction context; final SearchPhaseResults queryPhaseResults; @@ -51,7 +53,7 @@ public class RankFeaturePhase extends SearchPhase { AbstractSearchAsyncAction context, RankFeaturePhaseRankCoordinatorContext rankFeaturePhaseRankCoordinatorContext ) { - super("rank-feature"); + super(NAME); assert rankFeaturePhaseRankCoordinatorContext != null; this.rankFeaturePhaseRankCoordinatorContext = rankFeaturePhaseRankCoordinatorContext; if (context.getNumShards() != queryPhaseResults.getNumShards()) { @@ -71,7 +73,7 @@ public class RankFeaturePhase extends SearchPhase { } @Override - public void run() { + protected void run() { context.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { @@ -84,7 +86,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } }); } @@ -139,7 +141,7 @@ protected void innerOnResponse(RankFeatureResult response) { progressListener.notifyRankFeatureResult(shardIndex); rankRequestCounter.onResult(response); } catch (Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "", e); + context.onPhaseFailure(NAME, "", e); } } @@ -194,7 +196,7 @@ public void onResponse(RankFeatureDoc[] docsWithUpdatedScores) { @Override public void onFailure(Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "Computing updated ranks for results failed", e); + context.onPhaseFailure(NAME, "Computing updated ranks for results failed", e); } } ); @@ -239,6 +241,6 @@ private float maxScore(ScoreDoc[] scoreDocs) { } void moveToNextPhase(SearchPhaseResults phaseResults, SearchPhaseController.ReducedQueryPhase reducedQueryPhase) { - context.executeNextPhase(this, () -> new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase)); + context.executeNextPhase(NAME, () -> new FetchSearchPhase(phaseResults, aggregatedDfs, context, reducedQueryPhase)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index 7d849a72abf9d..702369dc38390 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -9,25 +9,25 @@ package org.elasticsearch.action.search; import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.Objects; import java.util.function.Function; /** * Base class for all individual search phases like collecting distributed frequencies, fetching documents, querying shards. */ -abstract class SearchPhase implements CheckedRunnable { +abstract class SearchPhase { private final String name; protected SearchPhase(String name) { this.name = Objects.requireNonNull(name, "name must not be null"); } + protected abstract void run(); + /** * Returns the phases name. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index b118c2560925e..69e7fba4dd0d5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -551,9 +551,8 @@ static ReducedQueryPhase reducedQueryPhase( assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; numReducePhases++; // increment for this phase if (queryResults.isEmpty()) { // early terminate we have nothing to reduce - final TotalHits totalHits = topDocsStats.getTotalHits(); return new ReducedQueryPhase( - totalHits, + topDocsStats.getTotalHits(), topDocsStats.fetchHits, topDocsStats.getMaxScore(), false, @@ -570,8 +569,7 @@ static ReducedQueryPhase reducedQueryPhase( true ); } - int total = queryResults.size(); - final Collection nonNullResults = new ArrayList<>(); + final List nonNullResults = new ArrayList<>(); boolean hasSuggest = false; boolean hasProfileResults = false; for (SearchPhaseResult queryResult : queryResults) { @@ -581,12 +579,11 @@ static ReducedQueryPhase reducedQueryPhase( } hasSuggest |= res.suggest() != null; hasProfileResults |= res.hasProfileResults(); - nonNullResults.add(queryResult); + nonNullResults.add(res); } - queryResults = nonNullResults; - validateMergeSortValueFormats(queryResults); - if (queryResults.isEmpty()) { - var ex = new IllegalStateException("must have at least one non-empty search result, got 0 out of " + total); + validateMergeSortValueFormats(nonNullResults); + if (nonNullResults.isEmpty()) { + var ex = new IllegalStateException("must have at least one non-empty search result, got 0 out of " + queryResults.size()); assert false : ex; throw ex; } @@ -594,13 +591,12 @@ static ReducedQueryPhase reducedQueryPhase( // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) final Map>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map profileShardResults = hasProfileResults - ? Maps.newMapWithExpectedSize(queryResults.size()) + ? Maps.newMapWithExpectedSize(nonNullResults.size()) : Collections.emptyMap(); int from = 0; int size = 0; DocValueFormat[] sortValueFormats = null; - for (SearchPhaseResult entry : queryResults) { - QuerySearchResult result = entry.queryResult(); + for (QuerySearchResult result : nonNullResults) { from = result.from(); // sorted queries can set the size to 0 if they have enough competitive hits. size = Math.max(result.size(), size); @@ -611,8 +607,7 @@ static ReducedQueryPhase reducedQueryPhase( if (hasSuggest) { assert result.suggest() != null; for (Suggestion> suggestion : result.suggest()) { - List> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); - suggestionList.add(suggestion); + groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()).add(suggestion); if (suggestion instanceof CompletionSuggestion completionSuggestion) { completionSuggestion.setShardIndex(result.getShardIndex()); } @@ -620,53 +615,48 @@ static ReducedQueryPhase reducedQueryPhase( } assert bufferedTopDocs.isEmpty() || result.hasConsumedTopDocs() : "firstResult has no aggs but we got non null buffered aggs?"; if (hasProfileResults) { - String key = result.getSearchShardTarget().toString(); - profileShardResults.put(key, result.consumeProfileResult()); + profileShardResults.put(result.getSearchShardTarget().toString(), result.consumeProfileResult()); } } - final Suggest reducedSuggest; - final List reducedCompletionSuggestions; - if (groupedSuggestions.isEmpty()) { - reducedSuggest = null; - reducedCompletionSuggestions = Collections.emptyList(); - } else { - reducedSuggest = new Suggest(Suggest.reduce(groupedSuggestions)); - reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class); - } - final InternalAggregations aggregations = bufferedAggs == null - ? null - : InternalAggregations.topLevelReduceDelayable( - bufferedAggs, - performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() - ); - final SearchProfileResultsBuilder profileBuilder = profileShardResults.isEmpty() - ? null - : new SearchProfileResultsBuilder(profileShardResults); + final Suggest reducedSuggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); final SortedTopDocs sortedTopDocs; if (queryPhaseRankCoordinatorContext == null) { - sortedTopDocs = sortDocs(isScrollRequest, bufferedTopDocs, from, size, reducedCompletionSuggestions); + sortedTopDocs = sortDocs( + isScrollRequest, + bufferedTopDocs, + from, + size, + reducedSuggest == null ? Collections.emptyList() : reducedSuggest.filter(CompletionSuggestion.class) + ); } else { - ScoreDoc[] rankedDocs = queryPhaseRankCoordinatorContext.rankQueryPhaseResults( - queryResults.stream().map(SearchPhaseResult::queryResult).toList(), - topDocsStats + sortedTopDocs = new SortedTopDocs( + queryPhaseRankCoordinatorContext.rankQueryPhaseResults(nonNullResults, topDocsStats), + false, + null, + null, + null, + 0 ); - sortedTopDocs = new SortedTopDocs(rankedDocs, false, null, null, null, 0); size = sortedTopDocs.scoreDocs.length; // we need to reset from here as pagination and result trimming has already taken place // within the `QueryPhaseRankCoordinatorContext#rankQueryPhaseResults` and we don't want // to apply it again in the `getHits` method. from = 0; } - final TotalHits totalHits = topDocsStats.getTotalHits(); return new ReducedQueryPhase( - totalHits, + topDocsStats.getTotalHits(), topDocsStats.fetchHits, topDocsStats.getMaxScore(), topDocsStats.timedOut, topDocsStats.terminatedEarly, reducedSuggest, - aggregations, - profileBuilder, + bufferedAggs == null + ? null + : InternalAggregations.topLevelReduceDelayable( + bufferedAggs, + performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() + ), + profileShardResults.isEmpty() ? null : new SearchProfileResultsBuilder(profileShardResults), sortedTopDocs, sortValueFormats, queryPhaseRankCoordinatorContext, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java index 2d798f2da3a4f..fc79e85f9326d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java @@ -73,15 +73,21 @@ public RestStatus status() { // on coordinator node. so get the status from cause instead of returning SERVICE_UNAVAILABLE blindly return getCause() == null ? RestStatus.SERVICE_UNAVAILABLE : ExceptionsHelper.status(getCause()); } - RestStatus status = shardFailures[0].status(); - if (shardFailures.length > 1) { - for (int i = 1; i < shardFailures.length; i++) { - if (shardFailures[i].status().getStatus() >= RestStatus.INTERNAL_SERVER_ERROR.getStatus()) { - status = shardFailures[i].status(); - } + RestStatus status = null; + for (ShardSearchFailure shardFailure : shardFailures) { + RestStatus shardStatus = shardFailure.status(); + int statusCode = shardStatus.getStatus(); + + // Return if it's an error that can be retried. + // These currently take precedence over other status code(s). + if (statusCode >= 502 && statusCode <= 504) { + return shardStatus; + } else if (statusCode >= 500) { + status = shardStatus; } } - return status; + + return status == null ? shardFailures[0].status() : status; } public ShardSearchFailure[] shardFailures() { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java index 73bb0d545a2e0..54f9f5549f30c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java @@ -49,8 +49,6 @@ final int getNumShards() { */ abstract boolean hasResult(int shardIndex); - void consumeShardFailure(int shardIndex) {} - AtomicArray getAtomicArray() { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 2e1d58e042f09..8b77ec7fb5463 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -24,7 +24,6 @@ import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.SearchContext; @@ -82,7 +81,7 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private Boolean allowPartialSearchResults; - private Scroll scroll; + private TimeValue scrollKeepAlive; private int batchedReduceSize = DEFAULT_BATCHED_REDUCE_SIZE; @@ -206,7 +205,7 @@ private SearchRequest( this.preFilterShardSize = searchRequest.preFilterShardSize; this.requestCache = searchRequest.requestCache; this.routing = searchRequest.routing; - this.scroll = searchRequest.scroll; + this.scrollKeepAlive = searchRequest.scrollKeepAlive; this.searchType = searchRequest.searchType; this.source = searchRequest.source; this.localClusterAlias = localClusterAlias; @@ -229,7 +228,7 @@ public SearchRequest(StreamInput in) throws IOException { indices = in.readStringArray(); routing = in.readOptionalString(); preference = in.readOptionalString(); - scroll = in.readOptionalWriteable(Scroll::new); + scrollKeepAlive = in.readOptionalTimeValue(); source = in.readOptionalWriteable(SearchSourceBuilder::new); if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore @@ -276,7 +275,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); out.writeOptionalString(routing); out.writeOptionalString(preference); - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scrollKeepAlive); out.writeOptionalWriteable(source); if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions @@ -525,23 +524,16 @@ public String[] indices() { /** * If set, will enable scrolling of the search request. */ - public Scroll scroll() { - return scroll; - } - - /** - * If set, will enable scrolling of the search request. - */ - public SearchRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; + public TimeValue scroll() { + return scrollKeepAlive; } /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchRequest scroll(TimeValue keepAlive) { - return scroll(new Scroll(keepAlive)); + this.scrollKeepAlive = keepAlive; + return this; } /** @@ -681,7 +673,7 @@ public boolean hasKnnSearch() { } public int resolveTrackTotalHitsUpTo() { - return resolveTrackTotalHitsUpTo(scroll, source); + return resolveTrackTotalHitsUpTo(scrollKeepAlive, source); } /** @@ -731,7 +723,7 @@ public SearchRequest rewrite(QueryRewriteContext ctx) throws IOException { return hasChanged ? new SearchRequest(this).source(source) : this; } - public static int resolveTrackTotalHitsUpTo(Scroll scroll, SearchSourceBuilder source) { + public static int resolveTrackTotalHitsUpTo(TimeValue scroll, SearchSourceBuilder source) { if (scroll != null) { // no matter what the value of track_total_hits is return SearchContext.TRACK_TOTAL_HITS_ACCURATE; @@ -752,8 +744,8 @@ public final String buildDescription() { Strings.arrayToDelimitedString(indices, ",", sb); sb.append("]"); sb.append(", search_type[").append(searchType).append("]"); - if (scroll != null) { - sb.append(", scroll[").append(scroll.keepAlive()).append("]"); + if (scrollKeepAlive != null) { + sb.append(", scroll[").append(scrollKeepAlive).append("]"); } if (source != null) { sb.append(", source[").append(source.toString(FORMAT_PARAMS)).append("]"); @@ -784,7 +776,7 @@ public boolean equals(Object o) { && Objects.equals(preference, that.preference) && Objects.equals(source, that.source) && Objects.equals(requestCache, that.requestCache) - && Objects.equals(scroll, that.scroll) + && Objects.equals(scrollKeepAlive, that.scrollKeepAlive) && Objects.equals(batchedReduceSize, that.batchedReduceSize) && Objects.equals(maxConcurrentShardRequests, that.maxConcurrentShardRequests) && Objects.equals(preFilterShardSize, that.preFilterShardSize) @@ -805,7 +797,7 @@ public int hashCode() { preference, source, requestCache, - scroll, + scrollKeepAlive, indicesOptions, batchedReduceSize, maxConcurrentShardRequests, @@ -836,7 +828,7 @@ public String toString() { + ", requestCache=" + requestCache + ", scroll=" - + scroll + + scrollKeepAlive + ", maxConcurrentShardRequests=" + maxConcurrentShardRequests + ", batchedReduceSize=" diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 2927c394da3d4..d309ef3a7498a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.Script; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -62,14 +61,6 @@ public SearchRequestBuilder setSearchType(SearchType searchType) { return this; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchRequestBuilder setScroll(Scroll scroll) { - request.scroll(scroll); - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 041b3ae73c1ee..787dc14f6cd96 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -14,12 +14,13 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; @@ -342,8 +343,7 @@ public ShardSearchFailure[] getShardFailures() { } /** - * If scrolling was enabled ({@link SearchRequest#scroll(org.elasticsearch.search.Scroll)}, the - * scroll id that can be used to continue scrolling. + * If scrolling was enabled ({@link SearchRequest#scroll(TimeValue)}, the scroll id that can be used to continue scrolling. */ public String getScrollId() { return scrollId; @@ -382,17 +382,24 @@ public Clusters getClusters() { @Override public Iterator toXContentChunked(ToXContent.Params params) { assert hasReferences(); - return ChunkedToXContent.builder(params).xContentObject(innerToXContentChunked(params)); + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + this.innerToXContentChunked(params), + ChunkedToXContentHelper.endObject() + ); } public Iterator innerToXContentChunked(ToXContent.Params params) { - return ChunkedToXContent.builder(params) - .append(SearchResponse.this::headerToXContent) - .append(clusters) - .append(hits) - .appendIfPresent(aggregations) - .appendIfPresent(suggest) - .appendIfPresent(profileResults); + return Iterators.concat( + ChunkedToXContentHelper.chunk(SearchResponse.this::headerToXContent), + Iterators.single(clusters), + Iterators.concat( + hits.toXContentChunked(params), + aggregations == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(aggregations), + suggest == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(suggest), + profileResults == null ? Collections.emptyIterator() : ChunkedToXContentHelper.chunk(profileResults) + ) + ); } public XContentBuilder headerToXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java index 8c9a42a61e33e..9d85348b80d62 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java @@ -9,14 +9,12 @@ package org.elasticsearch.action.search; -import org.elasticsearch.core.RefCounted; -import org.elasticsearch.core.SimpleRefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.transport.LeakTracker; import java.util.Collections; import java.util.Map; @@ -25,7 +23,7 @@ * Holds some sections that a search response is composed of (hits, aggs, suggestions etc.) during some steps of the search response * building. */ -public class SearchResponseSections implements RefCounted { +public class SearchResponseSections implements Releasable { public static final SearchResponseSections EMPTY_WITH_TOTAL_HITS = new SearchResponseSections( SearchHits.EMPTY_WITH_TOTAL_HITS, @@ -53,8 +51,6 @@ public class SearchResponseSections implements RefCounted { protected final Boolean terminatedEarly; protected final int numReducePhases; - private final RefCounted refCounted; - public SearchResponseSections( SearchHits hits, InternalAggregations aggregations, @@ -72,7 +68,6 @@ public SearchResponseSections( this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; this.numReducePhases = numReducePhases; - refCounted = hits.getHits().length > 0 ? LeakTracker.wrap(new SimpleRefCounted()) : ALWAYS_REFERENCED; } public final SearchHits hits() { @@ -97,26 +92,7 @@ public final Map profile() { } @Override - public void incRef() { - refCounted.incRef(); - } - - @Override - public boolean tryIncRef() { - return refCounted.tryIncRef(); - } - - @Override - public boolean decRef() { - if (refCounted.decRef()) { - hits.decRef(); - return true; - } - return false; - } - - @Override - public boolean hasReferences() { - return refCounted.hasReferences(); + public void close() { + hits.decRef(); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 60e96a8cce8ab..caba10d5cee02 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -40,7 +40,7 @@ * fan out to nodes and execute the query part of the scroll request. Subclasses can for instance * run separate fetch phases etc. */ -abstract class SearchScrollAsyncAction implements Runnable { +abstract class SearchScrollAsyncAction { protected final Logger logger; protected final ActionListener listener; protected final ParsedScrollId scrollId; @@ -229,7 +229,7 @@ protected SearchPhase sendResponsePhase( ) { return new SearchPhase("fetch") { @Override - public void run() { + protected void run() { sendResponse(queryPhase, fetchResults); } }; @@ -246,8 +246,7 @@ protected final void sendResponse( if (request.scroll() != null) { scrollId = request.scrollId(); } - var sections = SearchPhaseController.merge(true, queryPhase, fetchResults); - try { + try (var sections = SearchPhaseController.merge(true, queryPhase, fetchResults)) { ActionListener.respondAndRelease( listener, new SearchResponse( @@ -262,8 +261,6 @@ protected final void sendResponse( null ) ); - } finally { - sections.decRef(); } } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 8c33e3ca7da4b..fd3d748a556be 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -65,7 +65,7 @@ protected void executeInitialPhase( protected SearchPhase moveToNextPhase(BiFunction clusterNodeLookup) { return new SearchPhase("fetch") { @Override - public void run() { + protected void run() { final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = SearchPhaseController.reducedScrollQueryPhase( queryResults.asList() ); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java index 71b88b03a5463..1d34288665ae4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.ToXContentObject; @@ -30,7 +29,7 @@ public class SearchScrollRequest extends ActionRequest implements ToXContentObject { private String scrollId; - private Scroll scroll; + private TimeValue scroll; public SearchScrollRequest() {} @@ -41,14 +40,14 @@ public SearchScrollRequest(String scrollId) { public SearchScrollRequest(StreamInput in) throws IOException { super(in); scrollId = in.readString(); - scroll = in.readOptionalWriteable(Scroll::new); + scroll = in.readOptionalTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(scrollId); - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scroll); } @Override @@ -79,23 +78,16 @@ public ParsedScrollId parseScrollId() { /** * If set, will enable scrolling of the search request. */ - public Scroll scroll() { + public TimeValue scroll() { return scroll; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchScrollRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchScrollRequest scroll(TimeValue keepAlive) { - return scroll(new Scroll(keepAlive)); + this.scroll = keepAlive; + return this; } @Override @@ -135,7 +127,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("scroll_id", scrollId); if (scroll != null) { - builder.field("scroll", scroll.keepAlive().getStringRep()); + builder.field("scroll", scroll.getStringRep()); } builder.endObject(); return builder; @@ -157,7 +149,7 @@ public void fromXContent(XContentParser parser) throws IOException { } else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { scrollId(parser.text()); } else if ("scroll".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { - scroll(new Scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll"))); + scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll")); } else { throw new IllegalArgumentException( "Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] " diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java index 24dac98166ce0..57a6e44f4c2b0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; /** * A search scroll action request builder. @@ -35,14 +34,6 @@ public SearchScrollRequestBuilder setScrollId(String scrollId) { return this; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchScrollRequestBuilder setScroll(Scroll scroll) { - request.scroll(scroll); - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index cfc2e1bcdaf2b..2041754bc2bcc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -456,7 +456,8 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> searchService.executeQueryPhase( request, (SearchShardTask) task, - new ChannelActionListener<>(channel) + new ChannelActionListener<>(channel), + channel.getVersion() ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, true, QuerySearchResult::new); @@ -468,7 +469,8 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> searchService.executeQueryPhase( request, (SearchShardTask) task, - new ChannelActionListener<>(channel) + new ChannelActionListener<>(channel), + channel.getVersion() ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, true, ScrollQuerySearchResult::new); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 36ca0fba94372..6c95a3c8fd436 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -270,7 +270,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase() { return new SearchPhase(getName()) { @Override - public void run() { + protected void run() { sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); } }; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index ae27406bf396d..70a7f4c8cad0c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -388,10 +388,7 @@ void executeRequest( if (original.pointInTimeBuilder() != null) { tl.setFeature(CCSUsageTelemetry.PIT_FEATURE); } - String client = task.getHeader(Task.X_ELASTIC_PRODUCT_ORIGIN_HTTP_HEADER); - if (client != null) { - tl.setClient(client); - } + tl.setClient(task); // Check if any of the index patterns are wildcard patterns var localIndices = resolvedIndices.getLocalIndices(); if (localIndices != null && Arrays.stream(localIndices.indices()).anyMatch(Regex::isSimpleMatchPattern)) { @@ -508,6 +505,7 @@ void executeRequest( } } }); + final SearchSourceBuilder source = original.source(); if (shouldOpenPIT(source)) { // disabling shard reordering for request @@ -1883,7 +1881,7 @@ private interface TelemetryListener { void setFeature(String feature); - void setClient(String client); + void setClient(Task task); } private class SearchResponseActionListener extends DelegatingActionListener @@ -1917,8 +1915,8 @@ public void setFeature(String feature) { } @Override - public void setClient(String client) { - usageBuilder.setClient(client); + public void setClient(Task task) { + usageBuilder.setClientFromTask(task); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java index cffba76988f7d..b232cd16ba65e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchScrollAction.java @@ -91,7 +91,7 @@ public void onFailure(Exception e) { }; try { ParsedScrollId scrollId = parseScrollId(request.scrollId()); - Runnable action = switch (scrollId.getType()) { + var action = switch (scrollId.getType()) { case QUERY_THEN_FETCH_TYPE -> new SearchScrollQueryThenFetchAsyncAction( logger, clusterService, diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index ebbd47336e3da..4525259451481 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; @@ -47,37 +46,13 @@ * @param gatekeeperOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of * aliases or indices are allowed, or they will throw an error. It acts as a gatekeeper when an action * does not support certain options. - * @param selectorOptions, applies to all resolved expressions, and it specifies the index component that should be included, if there - * is no index component defined on the expression level. */ public record IndicesOptions( ConcreteTargetOptions concreteTargetOptions, WildcardOptions wildcardOptions, - GatekeeperOptions gatekeeperOptions, - SelectorOptions selectorOptions + GatekeeperOptions gatekeeperOptions ) implements ToXContentFragment { - /** - * @deprecated this query param will be replaced by the selector `::` on the expression level - */ - @Deprecated - public static final String FAILURE_STORE_QUERY_PARAM = "failure_store"; - /** - * @deprecated this value will be replaced by the selector `::*` on the expression level - */ - @Deprecated - public static final String INCLUDE_ALL = "include"; - /** - * @deprecated this value will be replaced by the selector `::data` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_REGULAR_INDICES = "exclude"; - /** - * @deprecated this value will be replaced by the selector `::failures` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; - public static IndicesOptions.Builder builder() { return new Builder(); } @@ -324,14 +299,14 @@ public static Builder builder(WildcardOptions wildcardOptions) { * - The ignoreThrottled flag, which is a deprecated flag that will filter out frozen indices. * @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default. * @param allowClosedIndices, allow closed indices, true by default. - * @param allowFailureIndices, allow failure indices in the response, true by default + * @param allowSelectors, allow selectors within index expressions, true by default. * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. This is deprecated and the only one * that only filters and never throws an error. */ public record GatekeeperOptions( boolean allowAliasToMultipleIndices, boolean allowClosedIndices, - boolean allowFailureIndices, + boolean allowSelectors, @Deprecated boolean ignoreThrottled ) implements ToXContentFragment { @@ -355,7 +330,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Builder { private boolean allowAliasToMultipleIndices; private boolean allowClosedIndices; - private boolean allowFailureIndices; + private boolean allowSelectors; private boolean ignoreThrottled; public Builder() { @@ -365,7 +340,7 @@ public Builder() { Builder(GatekeeperOptions options) { allowAliasToMultipleIndices = options.allowAliasToMultipleIndices; allowClosedIndices = options.allowClosedIndices; - allowFailureIndices = options.allowFailureIndices; + allowSelectors = options.allowSelectors; ignoreThrottled = options.ignoreThrottled; } @@ -388,11 +363,12 @@ public Builder allowClosedIndices(boolean allowClosedIndices) { } /** - * Failure indices are accepted when true, otherwise the resolution will throw an error. + * Selectors are allowed within index expressions when true, otherwise the resolution will treat their presence as a syntax + * error when resolving index expressions. * Defaults to true. */ - public Builder allowFailureIndices(boolean allowFailureIndices) { - this.allowFailureIndices = allowFailureIndices; + public Builder allowSelectors(boolean allowSelectors) { + this.allowSelectors = allowSelectors; return this; } @@ -405,7 +381,7 @@ public Builder ignoreThrottled(boolean ignoreThrottled) { } public GatekeeperOptions build() { - return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowFailureIndices, ignoreThrottled); + return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowSelectors, ignoreThrottled); } } @@ -418,50 +394,6 @@ public static Builder builder(GatekeeperOptions gatekeeperOptions) { } } - /** - * Defines which selectors should be used by default for an index operation in the event that no selectors are provided. - */ - public record SelectorOptions(IndexComponentSelector defaultSelector) implements Writeable { - - public static final SelectorOptions ALL_APPLICABLE = new SelectorOptions(IndexComponentSelector.ALL_APPLICABLE); - public static final SelectorOptions DATA = new SelectorOptions(IndexComponentSelector.DATA); - public static final SelectorOptions FAILURES = new SelectorOptions(IndexComponentSelector.FAILURES); - /** - * Default instance. Uses
::data
as the default selector if none are present in an index expression. - */ - public static final SelectorOptions DEFAULT = DATA; - - public static SelectorOptions read(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - EnumSet set = in.readEnumSet(IndexComponentSelector.class); - if (set.isEmpty() || set.size() == 2) { - assert set.contains(IndexComponentSelector.DATA) && set.contains(IndexComponentSelector.FAILURES) - : "The enum set only supported ::data and ::failures"; - return SelectorOptions.ALL_APPLICABLE; - } else if (set.contains(IndexComponentSelector.DATA)) { - return SelectorOptions.DATA; - } else { - return SelectorOptions.FAILURES; - } - } else { - return new SelectorOptions(IndexComponentSelector.read(in)); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - switch (defaultSelector) { - case ALL_APPLICABLE -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES)); - case DATA -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA)); - case FAILURES -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.FAILURES)); - } - } else { - defaultSelector.writeTo(out); - } - } - } - /** * This class is maintained for backwards compatibility and performance purposes. We use it for serialisation along with {@link Option}. */ @@ -497,7 +429,8 @@ private enum Option { ERROR_WHEN_CLOSED_INDICES, IGNORE_THROTTLED, - ALLOW_FAILURE_INDICES // Added in 8.14 + ALLOW_FAILURE_INDICES, // Added in 8.14, Removed in 8.18 + ALLOW_SELECTORS // Added in 8.18 } private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndicesOptions.class); @@ -510,8 +443,7 @@ private enum Option { public static final IndicesOptions DEFAULT = new IndicesOptions( ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, WildcardOptions.DEFAULT, - GatekeeperOptions.DEFAULT, - SelectorOptions.DEFAULT + GatekeeperOptions.DEFAULT ); public static final IndicesOptions STRICT_EXPAND_OPEN = IndicesOptions.builder() @@ -528,10 +460,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -547,10 +478,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -566,10 +496,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -585,7 +514,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -603,10 +532,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -622,10 +550,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -636,10 +563,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTOR = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -650,7 +576,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -668,10 +594,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -682,10 +607,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -696,7 +620,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -714,10 +638,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -728,10 +651,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -747,10 +669,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -766,10 +687,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -785,10 +705,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -804,10 +723,9 @@ private enum Option { GatekeeperOptions.builder() .ignoreThrottled(true) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(true) .allowAliasToMultipleIndices(true) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -823,10 +741,27 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(false) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(false) + .ignoreThrottled(false) + ) + .build(); + public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED_ALLOW_SELECTORS = IndicesOptions.builder() + .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) + .wildcardOptions( + WildcardOptions.builder() + .matchOpen(false) + .matchClosed(false) + .includeHidden(false) + .allowEmptyExpressions(true) + .resolveAliases(true) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(false) + .allowClosedIndices(false) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -842,10 +777,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); /** @@ -903,10 +837,10 @@ public boolean forbidClosedIndices() { } /** - * @return Whether execution on failure indices is allowed. + * @return Whether selectors (::) are allowed in the index expression. */ - public boolean allowFailureIndices() { - return gatekeeperOptions.allowFailureIndices(); + public boolean allowSelectors() { + return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.allowSelectors(); } /** @@ -930,20 +864,6 @@ public boolean ignoreThrottled() { return gatekeeperOptions().ignoreThrottled(); } - /** - * @return whether regular indices (stand-alone or backing indices) will be included in the response - */ - public boolean includeRegularIndices() { - return selectorOptions().defaultSelector().shouldIncludeData(); - } - - /** - * @return whether failure indices (only supported by certain data streams) will be included in the response - */ - public boolean includeFailureIndices() { - return selectorOptions().defaultSelector().shouldIncludeFailures(); - } - public void writeIndicesOptions(StreamOutput out) throws IOException { EnumSet