diff --git a/.buildkite/scripts/get-latest-test-mutes.sh b/.buildkite/scripts/get-latest-test-mutes.sh
index 5721e29f1b773..1dafcebec24b1 100755
--- a/.buildkite/scripts/get-latest-test-mutes.sh
+++ b/.buildkite/scripts/get-latest-test-mutes.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-if [[ ! "${BUILDKITE_PULL_REQUEST:-}" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then
+if [[ "${BUILDKITE_PULL_REQUEST:-false}" == "false" || "${BUILDKITE_AGENT_META_DATA_PROVIDER:-}" == "k8s" ]]; then
exit 0
fi
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java
index 9aab4a3e3210f..d3259b9604717 100644
--- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java
@@ -27,6 +27,7 @@
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.expression.FieldAttribute;
+import org.elasticsearch.xpack.esql.core.expression.FoldContext;
import org.elasticsearch.xpack.esql.core.expression.Literal;
import org.elasticsearch.xpack.esql.core.expression.predicate.regex.RLikePattern;
import org.elasticsearch.xpack.esql.core.tree.Source;
@@ -71,12 +72,11 @@ public class EvalBenchmark {
BigArrays.NON_RECYCLING_INSTANCE
);
+ private static final FoldContext FOLD_CONTEXT = FoldContext.small();
+
private static final int BLOCK_LENGTH = 8 * 1024;
- static final DriverContext driverContext = new DriverContext(
- BigArrays.NON_RECYCLING_INSTANCE,
- BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE)
- );
+ static final DriverContext driverContext = new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, blockFactory);
static {
// Smoke test all the expected values and force loading subclasses more like prod
@@ -114,11 +114,12 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) {
return switch (operation) {
case "abs" -> {
FieldAttribute longField = longField();
- yield EvalMapper.toEvaluator(new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Abs(Source.EMPTY, longField), layout(longField)).get(driverContext);
}
case "add" -> {
FieldAttribute longField = longField();
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new Add(Source.EMPTY, longField, new Literal(Source.EMPTY, 1L, DataType.LONG)),
layout(longField)
).get(driverContext);
@@ -126,6 +127,7 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) {
case "add_double" -> {
FieldAttribute doubleField = doubleField();
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new Add(Source.EMPTY, doubleField, new Literal(Source.EMPTY, 1D, DataType.DOUBLE)),
layout(doubleField)
).get(driverContext);
@@ -140,7 +142,8 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) {
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
rhs = new Add(Source.EMPTY, rhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
}
- yield EvalMapper.toEvaluator(new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2))
+ .get(driverContext);
}
case "date_trunc" -> {
FieldAttribute timestamp = new FieldAttribute(
@@ -149,6 +152,7 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) {
new EsField("timestamp", DataType.DATETIME, Map.of(), true)
);
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new DateTrunc(Source.EMPTY, new Literal(Source.EMPTY, Duration.ofHours(24), DataType.TIME_DURATION), timestamp),
layout(timestamp)
).get(driverContext);
@@ -156,6 +160,7 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) {
case "equal_to_const" -> {
FieldAttribute longField = longField();
yield EvalMapper.toEvaluator(
+ FOLD_CONTEXT,
new Equals(Source.EMPTY, longField, new Literal(Source.EMPTY, 100_000L, DataType.LONG)),
layout(longField)
).get(driverContext);
@@ -163,21 +168,21 @@ private static EvalOperator.ExpressionEvaluator evaluator(String operation) {
case "long_equal_to_long" -> {
FieldAttribute lhs = longField();
FieldAttribute rhs = longField();
- yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
}
case "long_equal_to_int" -> {
FieldAttribute lhs = longField();
FieldAttribute rhs = intField();
- yield EvalMapper.toEvaluator(new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Equals(Source.EMPTY, lhs, rhs), layout(lhs, rhs)).get(driverContext);
}
case "mv_min", "mv_min_ascending" -> {
FieldAttribute longField = longField();
- yield EvalMapper.toEvaluator(new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, new MvMin(Source.EMPTY, longField), layout(longField)).get(driverContext);
}
case "rlike" -> {
FieldAttribute keywordField = keywordField();
RLike rlike = new RLike(Source.EMPTY, keywordField, new RLikePattern(".ar"));
- yield EvalMapper.toEvaluator(rlike, layout(keywordField)).get(driverContext);
+ yield EvalMapper.toEvaluator(FOLD_CONTEXT, rlike, layout(keywordField)).get(driverContext);
}
default -> throw new UnsupportedOperationException();
};
diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
index c3b9768946767..1e57d9fab7cfd 100644
--- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
+++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java
@@ -187,20 +187,12 @@ static String agentCommandLineOption(Path agentJar, Path tmpPropertiesFile) {
static void extractSecureSettings(SecureSettings secrets, Map propertiesMap) {
final Set settingNames = secrets.getSettingNames();
for (String key : List.of("api_key", "secret_token")) {
- for (String prefix : List.of("telemetry.", "tracing.apm.")) {
- if (settingNames.contains(prefix + key)) {
- if (propertiesMap.containsKey(key)) {
- throw new IllegalStateException(
- Strings.format("Duplicate telemetry setting: [telemetry.%s] and [tracing.apm.%s]", key, key)
- );
- }
-
- try (SecureString token = secrets.getString(prefix + key)) {
- propertiesMap.put(key, token.toString());
- }
+ String prefix = "telemetry.";
+ if (settingNames.contains(prefix + key)) {
+ try (SecureString token = secrets.getString(prefix + key)) {
+ propertiesMap.put(key, token.toString());
}
}
-
}
}
@@ -227,44 +219,12 @@ private static Map extractDynamicSettings(Map pr
static Map extractApmSettings(Settings settings) throws UserException {
final Map propertiesMap = new HashMap<>();
- // tracing.apm.agent. is deprecated by telemetry.agent.
final String telemetryAgentPrefix = "telemetry.agent.";
- final String deprecatedTelemetryAgentPrefix = "tracing.apm.agent.";
final Settings telemetryAgentSettings = settings.getByPrefix(telemetryAgentPrefix);
telemetryAgentSettings.keySet().forEach(key -> propertiesMap.put(key, String.valueOf(telemetryAgentSettings.get(key))));
- final Settings apmAgentSettings = settings.getByPrefix(deprecatedTelemetryAgentPrefix);
- for (String key : apmAgentSettings.keySet()) {
- if (propertiesMap.containsKey(key)) {
- throw new IllegalStateException(
- Strings.format(
- "Duplicate telemetry setting: [%s%s] and [%s%s]",
- telemetryAgentPrefix,
- key,
- deprecatedTelemetryAgentPrefix,
- key
- )
- );
- }
- propertiesMap.put(key, String.valueOf(apmAgentSettings.get(key)));
- }
-
StringJoiner globalLabels = extractGlobalLabels(telemetryAgentPrefix, propertiesMap, settings);
- if (globalLabels.length() == 0) {
- globalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
- } else {
- StringJoiner tracingGlobalLabels = extractGlobalLabels(deprecatedTelemetryAgentPrefix, propertiesMap, settings);
- if (tracingGlobalLabels.length() != 0) {
- throw new IllegalArgumentException(
- "Cannot have global labels with tracing.agent prefix ["
- + globalLabels
- + "] and telemetry.apm.agent prefix ["
- + tracingGlobalLabels
- + "]"
- );
- }
- }
if (globalLabels.length() > 0) {
propertiesMap.put("global_labels", globalLabels.toString());
}
@@ -274,7 +234,7 @@ static Map extractApmSettings(Settings settings) throws UserExce
if (propertiesMap.containsKey(key)) {
throw new UserException(
ExitCodes.CONFIG,
- "Do not set a value for [tracing.apm.agent." + key + "], as this is configured automatically by Elasticsearch"
+ "Do not set a value for [telemetry.agent." + key + "], as this is configured automatically by Elasticsearch"
);
}
}
diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java
index a7ba8eb11fbcc..0e067afc1aa73 100644
--- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java
+++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/APMJvmOptionsTests.java
@@ -25,18 +25,15 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.function.Function;
import static org.elasticsearch.test.MapMatcher.matchesMap;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsInAnyOrder;
-import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.hasSize;
-import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
@@ -82,109 +79,63 @@ public void testFileDeleteWorks() throws IOException {
}
public void testExtractSecureSettings() {
- MockSecureSettings duplicateSecureSettings = new MockSecureSettings();
+ MockSecureSettings secureSettings = new MockSecureSettings();
+ secureSettings.setString("telemetry.secret_token", "token");
+ secureSettings.setString("telemetry.api_key", "key");
- for (String prefix : List.of("telemetry.", "tracing.apm.")) {
- MockSecureSettings secureSettings = new MockSecureSettings();
- secureSettings.setString(prefix + "secret_token", "token");
- secureSettings.setString(prefix + "api_key", "key");
-
- duplicateSecureSettings.setString(prefix + "api_key", "secret");
-
- Map propertiesMap = new HashMap<>();
- APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap);
-
- assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
- }
-
- Exception exception = expectThrows(
- IllegalStateException.class,
- () -> APMJvmOptions.extractSecureSettings(duplicateSecureSettings, new HashMap<>())
- );
- assertThat(exception.getMessage(), containsString("Duplicate telemetry setting"));
- assertThat(exception.getMessage(), containsString("telemetry.api_key"));
- assertThat(exception.getMessage(), containsString("tracing.apm.api_key"));
+ Map propertiesMap = new HashMap<>();
+ APMJvmOptions.extractSecureSettings(secureSettings, propertiesMap);
+ assertThat(propertiesMap, matchesMap(Map.of("secret_token", "token", "api_key", "key")));
}
public void testExtractSettings() throws UserException {
- Function buildSettings = (prefix) -> Settings.builder()
- .put(prefix + "server_url", "https://myurl:443")
- .put(prefix + "service_node_name", "instance-0000000001");
-
- for (String prefix : List.of("tracing.apm.agent.", "telemetry.agent.")) {
- var name = "APM Tracing";
- var deploy = "123";
- var org = "456";
- var extracted = APMJvmOptions.extractApmSettings(
- buildSettings.apply(prefix)
- .put(prefix + "global_labels.deployment_name", name)
- .put(prefix + "global_labels.deployment_id", deploy)
- .put(prefix + "global_labels.organization_id", org)
- .build()
- );
-
- assertThat(
- extracted,
- allOf(
- hasEntry("server_url", "https://myurl:443"),
- hasEntry("service_node_name", "instance-0000000001"),
- hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one
- not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys
- )
- );
-
- List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
- assertThat(labels, hasSize(3));
- assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy));
-
- // test replacing with underscores and skipping empty
- name = "APM=Tracing";
- deploy = "";
- org = ",456";
- extracted = APMJvmOptions.extractApmSettings(
- buildSettings.apply(prefix)
- .put(prefix + "global_labels.deployment_name", name)
- .put(prefix + "global_labels.deployment_id", deploy)
- .put(prefix + "global_labels.organization_id", org)
- .build()
- );
- labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
- assertThat(labels, hasSize(2));
- assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
- }
-
- IllegalStateException err = expectThrows(
- IllegalStateException.class,
- () -> APMJvmOptions.extractApmSettings(
- Settings.builder()
- .put("tracing.apm.agent.server_url", "https://myurl:443")
- .put("telemetry.agent.server_url", "https://myurl-2:443")
- .build()
- )
- );
- assertThat(err.getMessage(), is("Duplicate telemetry setting: [telemetry.agent.server_url] and [tracing.apm.agent.server_url]"));
- }
-
- public void testNoMixedLabels() {
- String telemetryAgent = "telemetry.agent.";
- String tracingAgent = "tracing.apm.agent.";
- Settings settings = Settings.builder()
- .put("tracing.apm.enabled", true)
- .put(telemetryAgent + "server_url", "https://myurl:443")
- .put(telemetryAgent + "service_node_name", "instance-0000000001")
- .put(tracingAgent + "global_labels.deployment_id", "123")
- .put(telemetryAgent + "global_labels.organization_id", "456")
+ Settings defaults = Settings.builder()
+ .put("telemetry.agent.server_url", "https://myurl:443")
+ .put("telemetry.agent.service_node_name", "instance-0000000001")
.build();
- IllegalArgumentException err = assertThrows(IllegalArgumentException.class, () -> APMJvmOptions.extractApmSettings(settings));
+ var name = "APM Tracing";
+ var deploy = "123";
+ var org = "456";
+ var extracted = APMJvmOptions.extractApmSettings(
+ Settings.builder()
+ .put(defaults)
+ .put("telemetry.agent.global_labels.deployment_name", name)
+ .put("telemetry.agent.global_labels.deployment_id", deploy)
+ .put("telemetry.agent.global_labels.organization_id", org)
+ .build()
+ );
+
assertThat(
- err.getMessage(),
- is(
- "Cannot have global labels with tracing.agent prefix [organization_id=456] and"
- + " telemetry.apm.agent prefix [deployment_id=123]"
+ extracted,
+ allOf(
+ hasEntry("server_url", "https://myurl:443"),
+ hasEntry("service_node_name", "instance-0000000001"),
+ hasEntry(equalTo("global_labels"), not(endsWith(","))), // test that we have collapsed all global labels into one
+ not(hasKey("global_labels.organization_id")) // tests that we strip out the top level label keys
)
);
+
+ List labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
+ assertThat(labels, hasSize(3));
+ assertThat(labels, containsInAnyOrder("deployment_name=APM Tracing", "organization_id=" + org, "deployment_id=" + deploy));
+
+ // test replacing with underscores and skipping empty
+ name = "APM=Tracing";
+ deploy = "";
+ org = ",456";
+ extracted = APMJvmOptions.extractApmSettings(
+ Settings.builder()
+ .put(defaults)
+ .put("telemetry.agent.global_labels.deployment_name", name)
+ .put("telemetry.agent.global_labels.deployment_id", deploy)
+ .put("telemetry.agent.global_labels.organization_id", org)
+ .build()
+ );
+ labels = Arrays.stream(extracted.get("global_labels").split(",")).toList();
+ assertThat(labels, hasSize(2));
+ assertThat(labels, containsInAnyOrder("deployment_name=APM_Tracing", "organization_id=_456"));
}
private Path makeFakeAgentJar() throws IOException {
diff --git a/docs/changelog/118602.yaml b/docs/changelog/118602.yaml
new file mode 100644
index 0000000000000..a75c5dcf11da3
--- /dev/null
+++ b/docs/changelog/118602.yaml
@@ -0,0 +1,5 @@
+pr: 118602
+summary: Limit memory usage of `fold`
+area: ES|QL
+type: bug
+issues: []
diff --git a/docs/changelog/119227.yaml b/docs/changelog/119227.yaml
new file mode 100644
index 0000000000000..1e3d4f97a3d27
--- /dev/null
+++ b/docs/changelog/119227.yaml
@@ -0,0 +1,13 @@
+pr: 119227
+summary: Remove unfreeze REST endpoint
+area: Indices APIs
+type: breaking
+issues: []
+breaking:
+ title: Remove unfreeze REST endpoint
+ area: REST API
+ details: >-
+ The `/{index}/_unfreeze` REST endpoint is no longer supported. This API was deprecated, and the corresponding
+ `/{index}/_freeze` endpoint was removed in 8.0.
+ impact: None, since it is not possible to have a frozen index in a version which is readable by Elasticsearch 9.0
+ notable: false
diff --git a/docs/changelog/119772.yaml b/docs/changelog/119772.yaml
new file mode 100644
index 0000000000000..58d483566b109
--- /dev/null
+++ b/docs/changelog/119772.yaml
@@ -0,0 +1,6 @@
+pr: 119772
+summary: ESQL Support IN operator for Date nanos
+area: ES|QL
+type: enhancement
+issues:
+ - 118578
diff --git a/docs/changelog/119831.yaml b/docs/changelog/119831.yaml
new file mode 100644
index 0000000000000..61c09d7d54de0
--- /dev/null
+++ b/docs/changelog/119831.yaml
@@ -0,0 +1,5 @@
+pr: 119831
+summary: Run `TransportClusterGetSettingsAction` on local node
+area: Infra/Settings
+type: enhancement
+issues: []
diff --git a/docs/changelog/119846.yaml b/docs/changelog/119846.yaml
new file mode 100644
index 0000000000000..9e7d99fe1be13
--- /dev/null
+++ b/docs/changelog/119846.yaml
@@ -0,0 +1,12 @@
+pr: 119846
+summary: Drop support for brackets from METADATA syntax
+area: ES|QL
+type: deprecation
+issues:
+ - 115401
+deprecation:
+ title: Drop support for brackets from METADATA syntax
+ area: ES|QL
+ details: Please describe the details of this change for the release notes. You can
+ use asciidoc.
+ impact: Please describe the impact of this change to users
diff --git a/docs/changelog/119893.yaml b/docs/changelog/119893.yaml
new file mode 100644
index 0000000000000..35a46ce0940d3
--- /dev/null
+++ b/docs/changelog/119893.yaml
@@ -0,0 +1,5 @@
+pr: 119893
+summary: Add enterprise license check for Inference API actions
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/119922.yaml b/docs/changelog/119922.yaml
new file mode 100644
index 0000000000000..2fc9d9529c968
--- /dev/null
+++ b/docs/changelog/119922.yaml
@@ -0,0 +1,5 @@
+pr: 119922
+summary: "[Inference API] fix spell words: covertToString to convertToString"
+area: Machine Learning
+type: enhancement
+issues: []
diff --git a/docs/changelog/119926.yaml b/docs/changelog/119926.yaml
new file mode 100644
index 0000000000000..3afafd5b2117f
--- /dev/null
+++ b/docs/changelog/119926.yaml
@@ -0,0 +1,11 @@
+pr: 119926
+summary: "Deprecated tracing.apm.* settings got removed."
+area: Infra/Metrics
+type: breaking
+issues: []
+breaking:
+ title: "Deprecated tracing.apm.* settings got removed."
+ area: Cluster and node setting
+ details: Deprecated `tracing.apm.*` settings got removed, use respective `telemetry.*` / `telemetry.tracing.*` settings instead.
+ impact: 9.x nodes will refuse to start if any such setting (including secret settings) is still present.
+ notable: false
diff --git a/docs/changelog/120014.yaml b/docs/changelog/120014.yaml
new file mode 100644
index 0000000000000..bef1f3ba49939
--- /dev/null
+++ b/docs/changelog/120014.yaml
@@ -0,0 +1,6 @@
+pr: 120014
+summary: Fix potential file leak in ES816BinaryQuantizedVectorsWriter
+area: Search
+type: bug
+issues:
+ - 119981
diff --git a/docs/changelog/120038.yaml b/docs/changelog/120038.yaml
new file mode 100644
index 0000000000000..fe3a2ccccc095
--- /dev/null
+++ b/docs/changelog/120038.yaml
@@ -0,0 +1,5 @@
+pr: 120038
+summary: Run template simulation actions on local node
+area: Ingest Node
+type: enhancement
+issues: []
diff --git a/docs/changelog/120042.yaml b/docs/changelog/120042.yaml
new file mode 100644
index 0000000000000..0093068ae9894
--- /dev/null
+++ b/docs/changelog/120042.yaml
@@ -0,0 +1,5 @@
+pr: 120042
+summary: Match dot prefix of migrated DS backing index with the source index
+area: Data streams
+type: bug
+issues: []
diff --git a/docs/changelog/120062.yaml b/docs/changelog/120062.yaml
new file mode 100644
index 0000000000000..42e8d97f17444
--- /dev/null
+++ b/docs/changelog/120062.yaml
@@ -0,0 +1,6 @@
+pr: 120062
+summary: Update Text Similarity Reranker to Properly Handle Aliases
+area: Ranking
+type: bug
+issues:
+ - 119617
diff --git a/docs/changelog/120084.yaml b/docs/changelog/120084.yaml
new file mode 100644
index 0000000000000..aafe490d79f1e
--- /dev/null
+++ b/docs/changelog/120084.yaml
@@ -0,0 +1,5 @@
+pr: 120084
+summary: Improve how reindex data stream index action handles api blocks
+area: Data streams
+type: enhancement
+issues: []
diff --git a/docs/changelog/120087.yaml b/docs/changelog/120087.yaml
new file mode 100644
index 0000000000000..8539640809b04
--- /dev/null
+++ b/docs/changelog/120087.yaml
@@ -0,0 +1,5 @@
+pr: 120087
+summary: Include `clusterApplyListener` in long cluster apply warnings
+area: Cluster Coordination
+type: enhancement
+issues: []
diff --git a/docs/changelog/120133.yaml b/docs/changelog/120133.yaml
new file mode 100644
index 0000000000000..4ec88267a1bf8
--- /dev/null
+++ b/docs/changelog/120133.yaml
@@ -0,0 +1,6 @@
+pr: 120133
+summary: Use approximation to advance matched queries
+area: Search
+type: bug
+issues:
+ - 120130
diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc
index 8ce8064a8161c..adfd38478ab21 100644
--- a/docs/reference/esql/esql-limitations.asciidoc
+++ b/docs/reference/esql/esql-limitations.asciidoc
@@ -30,11 +30,11 @@ include::processing-commands/limit.asciidoc[tag=limitation]
** You can use `to_datetime` to cast to millisecond dates to use unsupported functions
* `double` (`float`, `half_float`, `scaled_float` are represented as `double`)
* `ip`
-* `keyword` family including `keyword`, `constant_keyword`, and `wildcard`
+* `keyword` <> including `keyword`, `constant_keyword`, and `wildcard`
* `int` (`short` and `byte` are represented as `int`)
* `long`
* `null`
-* `text`
+* `text` <> including `text`, `semantic_text` and `match_only_text`
* experimental:[] `unsigned_long`
* `version`
* Spatial types
diff --git a/docs/reference/esql/functions/description/match.asciidoc b/docs/reference/esql/functions/description/match.asciidoc
index 931fd5eb2f94a..0724f0f108e3c 100644
--- a/docs/reference/esql/functions/description/match.asciidoc
+++ b/docs/reference/esql/functions/description/match.asciidoc
@@ -2,4 +2,4 @@
*Description*
-Use `MATCH` to perform a <> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on text fields, as well as other field types like boolean, dates, and numeric types. For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row.
+Use `MATCH` to perform a <> on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. Match can be used on fields from the text family like <> and <>, as well as other field types like keyword, boolean, dates, and numeric types. For a simplified syntax, you can use the <> `:` operator instead of `MATCH`. `MATCH` returns true if the provided query matches the row.
diff --git a/docs/reference/esql/functions/description/to_date_nanos.asciidoc b/docs/reference/esql/functions/description/to_date_nanos.asciidoc
index 3fac7295f1bed..955c19b43a12f 100644
--- a/docs/reference/esql/functions/description/to_date_nanos.asciidoc
+++ b/docs/reference/esql/functions/description/to_date_nanos.asciidoc
@@ -4,4 +4,4 @@
Converts an input to a nanosecond-resolution date value (aka date_nanos).
-NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
+NOTE: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json
index 3d96de05c8407..f9c7f2f27d6f9 100644
--- a/docs/reference/esql/functions/kibana/definition/bucket.json
+++ b/docs/reference/esql/functions/kibana/definition/bucket.json
@@ -1599,7 +1599,7 @@
"FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())",
"FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket",
"FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2",
- "FROM employees \n| STATS dates = VALUES(birth_date) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count\n| LIMIT 3"
+ "FROM employees\n| STATS dates = MV_SORT(VALUES(birth_date)) BY b = BUCKET(birth_date + 1 HOUR, 1 YEAR) - 1 HOUR\n| EVAL d_count = MV_COUNT(dates)\n| SORT d_count, b\n| LIMIT 3"
],
"preview" : false,
"snapshot_only" : false
diff --git a/docs/reference/esql/functions/kibana/definition/date_format.json b/docs/reference/esql/functions/kibana/definition/date_format.json
index 6e2738fafb964..629415da30fa2 100644
--- a/docs/reference/esql/functions/kibana/definition/date_format.json
+++ b/docs/reference/esql/functions/kibana/definition/date_format.json
@@ -4,6 +4,18 @@
"name" : "date_format",
"description" : "Returns a string representation of a date, in the provided format.",
"signatures" : [
+ {
+ "params" : [
+ {
+ "name" : "dateFormat",
+ "type" : "date",
+ "optional" : true,
+ "description" : "Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`."
+ }
+ ],
+ "variadic" : false,
+ "returnType" : "keyword"
+ },
{
"params" : [
{
diff --git a/docs/reference/esql/functions/kibana/definition/match.json b/docs/reference/esql/functions/kibana/definition/match.json
index d61534da81a6d..eb206cb9ddf4d 100644
--- a/docs/reference/esql/functions/kibana/definition/match.json
+++ b/docs/reference/esql/functions/kibana/definition/match.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "match",
- "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on text fields, as well as other field types like boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
+ "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on fields from the text family like <> and <>,\nas well as other field types like keyword, boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
"signatures" : [
{
"params" : [
diff --git a/docs/reference/esql/functions/kibana/definition/match_operator.json b/docs/reference/esql/functions/kibana/definition/match_operator.json
index 44233bbddb653..c8cbf1cf9d966 100644
--- a/docs/reference/esql/functions/kibana/definition/match_operator.json
+++ b/docs/reference/esql/functions/kibana/definition/match_operator.json
@@ -2,7 +2,7 @@
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "operator",
"name" : "match_operator",
- "description" : "Performs a <> on the specified field. Returns true if the provided query matches the row.",
+ "description" : "Use `MATCH` to perform a <> on the specified field.\nUsing `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.\n\nMatch can be used on text fields, as well as other field types like boolean, dates, and numeric types.\n\nFor a simplified syntax, you can use the <> `:` operator instead of `MATCH`.\n\n`MATCH` returns true if the provided query matches the row.",
"signatures" : [
{
"params" : [
diff --git a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json
index d9409bceb8e6f..210b9608f9eff 100644
--- a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json
+++ b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json
@@ -3,7 +3,7 @@
"type" : "eval",
"name" : "to_date_nanos",
"description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).",
- "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.",
+ "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.",
"signatures" : [
{
"params" : [
@@ -90,6 +90,6 @@
"returnType" : "date_nanos"
}
],
- "preview" : true,
+ "preview" : false,
"snapshot_only" : false
}
diff --git a/docs/reference/esql/functions/kibana/docs/match.md b/docs/reference/esql/functions/kibana/docs/match.md
index 72258a1682936..80bf84351c188 100644
--- a/docs/reference/esql/functions/kibana/docs/match.md
+++ b/docs/reference/esql/functions/kibana/docs/match.md
@@ -6,7 +6,8 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
Use `MATCH` to perform a <> on the specified field.
Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
-Match can be used on text fields, as well as other field types like boolean, dates, and numeric types.
+Match can be used on fields from the text family like <> and <>,
+as well as other field types like keyword, boolean, dates, and numeric types.
For a simplified syntax, you can use the <> `:` operator instead of `MATCH`.
diff --git a/docs/reference/esql/functions/kibana/docs/match_operator.md b/docs/reference/esql/functions/kibana/docs/match_operator.md
index b0b6196798087..7681c2d1ce231 100644
--- a/docs/reference/esql/functions/kibana/docs/match_operator.md
+++ b/docs/reference/esql/functions/kibana/docs/match_operator.md
@@ -3,7 +3,14 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
-->
### MATCH_OPERATOR
-Performs a <> on the specified field. Returns true if the provided query matches the row.
+Use `MATCH` to perform a <> on the specified field.
+Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL.
+
+Match can be used on text fields, as well as other field types like boolean, dates, and numeric types.
+
+For a simplified syntax, you can use the <> `:` operator instead of `MATCH`.
+
+`MATCH` returns true if the provided query matches the row.
```
FROM books
diff --git a/docs/reference/esql/functions/kibana/docs/to_date_nanos.md b/docs/reference/esql/functions/kibana/docs/to_date_nanos.md
index 0294802485ccb..1bce8d4fca832 100644
--- a/docs/reference/esql/functions/kibana/docs/to_date_nanos.md
+++ b/docs/reference/esql/functions/kibana/docs/to_date_nanos.md
@@ -5,4 +5,4 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
### TO_DATE_NANOS
Converts an input to a nanosecond-resolution date value (aka date_nanos).
-Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
+Note: The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z, attepting to convertvalues outside of that range will result in null with a warning.. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.
diff --git a/docs/reference/esql/functions/layout/to_date_nanos.asciidoc b/docs/reference/esql/functions/layout/to_date_nanos.asciidoc
index 977a0ac969e5d..2dfd13dac7e20 100644
--- a/docs/reference/esql/functions/layout/to_date_nanos.asciidoc
+++ b/docs/reference/esql/functions/layout/to_date_nanos.asciidoc
@@ -4,8 +4,6 @@
[[esql-to_date_nanos]]
=== `TO_DATE_NANOS`
-preview::["Do not use on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."]
-
*Syntax*
[.text-center]
diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc
index 9ac9ec290c07b..bd70c2789dfa2 100644
--- a/docs/reference/esql/functions/type-conversion-functions.asciidoc
+++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc
@@ -18,6 +18,7 @@
* <>
* experimental:[] <>
* <>
+* <>
* <>
* <>
* <>
@@ -37,6 +38,7 @@ include::layout/to_cartesianpoint.asciidoc[]
include::layout/to_cartesianshape.asciidoc[]
include::layout/to_dateperiod.asciidoc[]
include::layout/to_datetime.asciidoc[]
+include::layout/to_date_nanos.asciidoc[]
include::layout/to_degrees.asciidoc[]
include::layout/to_double.asciidoc[]
include::layout/to_geopoint.asciidoc[]
diff --git a/docs/reference/esql/functions/types/date_format.asciidoc b/docs/reference/esql/functions/types/date_format.asciidoc
index b2e97dfa8835a..580094e9be906 100644
--- a/docs/reference/esql/functions/types/date_format.asciidoc
+++ b/docs/reference/esql/functions/types/date_format.asciidoc
@@ -5,6 +5,7 @@
[%header.monospaced.styled,format=dsv,separator=|]
|===
dateFormat | date | result
+date | | keyword
keyword | date | keyword
text | date | keyword
|===
diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc
index ca7de396147a8..b6b82422cbb4a 100644
--- a/docs/reference/indices.asciidoc
+++ b/docs/reference/indices.asciidoc
@@ -24,7 +24,6 @@ index settings, aliases, mappings, and index templates.
* <>
* <>
* <>
-* <>
* <>
* <>
* <>
@@ -143,6 +142,5 @@ include::indices/shrink-index.asciidoc[]
include::indices/simulate-index.asciidoc[]
include::indices/simulate-template.asciidoc[]
include::indices/split-index.asciidoc[]
-include::indices/apis/unfreeze.asciidoc[]
include::indices/update-settings.asciidoc[]
include::indices/put-mapping.asciidoc[]
diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc
deleted file mode 100644
index 5d04d44db7443..0000000000000
--- a/docs/reference/indices/apis/unfreeze.asciidoc
+++ /dev/null
@@ -1,61 +0,0 @@
-[role="xpack"]
-[[unfreeze-index-api]]
-=== Unfreeze index API
-++++
-Unfreeze index
-++++
-
-[WARNING]
-.Deprecated in 7.14
-====
-In 8.0, we removed the ability to freeze an index. In previous versions,
-freezing an index reduced its memory overhead. However, frozen indices are no
-longer useful due to
-https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent
-improvements in heap memory usage].
-You can use this API to unfreeze indices that were frozen in 7.x. Frozen indices
-are not related to the frozen data tier.
-====
-
-.New API reference
-[sidebar]
---
-For the most up-to-date API details, refer to {api-es}/group/endpoint-indices[Index APIs].
---
-
-Unfreezes an index.
-
-[[unfreeze-index-api-request]]
-==== {api-request-title}
-
-`POST //_unfreeze`
-
-[[unfreeze-index-api-prereqs]]
-==== {api-prereq-title}
-
-* If the {es} {security-features} are enabled, you must have the `manage`
-<> for the target index or index alias.
-
-[[unfreeze-index-api-desc]]
-==== {api-description-title}
-
-When a frozen index is unfrozen, the index goes through the normal recovery
-process and becomes writeable again.
-
-[[unfreeze-index-api-path-parms]]
-==== {api-path-parms-title}
-
-``::
- (Required, string) Identifier for the index.
-
-[[unfreeze-index-api-examples]]
-==== {api-examples-title}
-
-The following example unfreezes an index:
-
-[source,console]
---------------------------------------------------
-POST /my-index-000001/_unfreeze
---------------------------------------------------
-// TEST[s/^/PUT my-index-000001\n/]
-// TEST[skip:unable to ignore deprecation warning]
diff --git a/docs/reference/indices/index-mgmt.asciidoc b/docs/reference/indices/index-mgmt.asciidoc
index 73643dbfd4b3b..131bc79faa40c 100644
--- a/docs/reference/indices/index-mgmt.asciidoc
+++ b/docs/reference/indices/index-mgmt.asciidoc
@@ -43,7 +43,7 @@ For more information on managing indices, refer to <>.
* To filter the list of indices, use the search bar or click a badge.
Badges indicate if an index is a <>, a
-<>, or <>.
+<>, or <>.
* To drill down into the index
<>, <>, and statistics,
diff --git a/docs/reference/inference/chat-completion-inference.asciidoc b/docs/reference/inference/chat-completion-inference.asciidoc
new file mode 100644
index 0000000000000..83a8f94634f2f
--- /dev/null
+++ b/docs/reference/inference/chat-completion-inference.asciidoc
@@ -0,0 +1,417 @@
+[role="xpack"]
+[[chat-completion-inference-api]]
+=== Chat completion inference API
+
+Streams a chat completion response.
+
+IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.
+For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models.
+However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>.
+
+
+[discrete]
+[[chat-completion-inference-api-request]]
+==== {api-request-title}
+
+`POST /_inference//_unified`
+
+`POST /_inference/chat_completion//_unified`
+
+
+[discrete]
+[[chat-completion-inference-api-prereqs]]
+==== {api-prereq-title}
+
+* Requires the `monitor_inference` <>
+(the built-in `inference_admin` and `inference_user` roles grant this privilege)
+* You must use a client that supports streaming.
+
+
+[discrete]
+[[chat-completion-inference-api-desc]]
+==== {api-description-title}
+
+The chat completion {infer} API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
+It only works with the `chat_completion` task type for `openai` and `elastic` {infer} services.
+
+[NOTE]
+====
+The `chat_completion` task type is only available within the _unified API and only supports streaming.
+====
+
+[discrete]
+[[chat-completion-inference-api-path-params]]
+==== {api-path-parms-title}
+
+``::
+(Required, string)
+The unique identifier of the {infer} endpoint.
+
+
+``::
+(Optional, string)
+The type of {infer} task that the model performs. If included, this must be set to the value `chat_completion`.
+
+
+[discrete]
+[[chat-completion-inference-api-request-body]]
+==== {api-request-body-title}
+
+`messages`::
+(Required, array of objects) A list of objects representing the conversation.
+Requests should generally only add new messages from the user (role `user`). The other message roles (`assistant`, `system`, or `tool`) should generally only be copied from the response to a previous completion request, such that the messages array is built up throughout a conversation.
++
+.Assistant message
+[%collapsible%closed]
+=====
+`content`::
+(Required unless `tool_calls` is specified, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`::
+(Required, string)
+The role of the message author. This should be set to `assistant` for this type of message.
++
+`tool_calls`::
+(Optional, array of objects)
+The tool calls generated by the model.
++
+.Examples
+[%collapsible%closed]
+======
+[source,js]
+------------------------------------------------------------
+{
+ "tool_calls": [
+ {
+ "id": "call_KcAjWtAww20AihPHphUh46Gd",
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "arguments": "{\"location\":\"Boston, MA\"}"
+ }
+ }
+ ]
+}
+------------------------------------------------------------
+// NOTCONSOLE
+======
++
+`id`:::
+(Required, string)
+The identifier of the tool call.
++
+`type`:::
+(Required, string)
+The type of tool call. This must be set to the value `function`.
++
+`function`:::
+(Required, object)
+The function that the model called.
++
+`name`::::
+(Required, string)
+The name of the function to call.
++
+`arguments`::::
+(Required, string)
+The arguments to call the function with in JSON format.
+=====
++
+.System message
+[%collapsible%closed]
+=====
+`content`:::
+(Required, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`:::
+(Required, string)
+The role of the message author. This should be set to `system` for this type of message.
+=====
++
+.Tool message
+[%collapsible%closed]
+=====
+`content`::
+(Required, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`::
+(Required, string)
+The role of the message author. This should be set to `tool` for this type of message.
++
+`tool_call_id`::
+(Required, string)
+The tool call that this message is responding to.
+=====
++
+.User message
+[%collapsible%closed]
+=====
+`content`::
+(Required, string or array of objects)
+The contents of the message.
++
+include::inference-shared.asciidoc[tag=chat-completion-schema-content-with-examples]
++
+`role`::
+(Required, string)
+The role of the message author. This should be set to `user` for this type of message.
+=====
+
+`model`::
+(Optional, string)
+The ID of the model to use. By default, the model ID is set to the value included when creating the inference endpoint.
+
+`max_completion_tokens`::
+(Optional, integer)
+The upper bound limit for the number of tokens that can be generated for a completion request.
+
+`stop`::
+(Optional, array of strings)
+A sequence of strings to control when the model should stop generating additional tokens.
+
+`temperature`::
+(Optional, float)
+The sampling temperature to use.
+
+`tools`::
+(Optional, array of objects)
+A list of tools that the model can call.
++
+.Structure
+[%collapsible%closed]
+=====
+`type`::
+(Required, string)
+The type of tool, must be set to the value `function`.
++
+`function`::
+(Required, object)
+The function definition.
++
+`description`:::
+(Optional, string)
+A description of what the function does. This is used by the model to choose when and how to call the function.
++
+`name`:::
+(Required, string)
+The name of the function.
++
+`parameters`:::
+(Optional, object)
+The parameters the functional accepts. This should be formatted as a JSON object.
++
+`strict`:::
+(Optional, boolean)
+Whether to enable schema adherence when generating the function call.
+=====
++
+.Examples
+[%collapsible%closed]
+======
+[source,js]
+------------------------------------------------------------
+{
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_price_of_item",
+ "description": "Get the current price of an item",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "item": {
+ "id": "12345"
+ },
+ "unit": {
+ "type": "currency"
+ }
+ }
+ }
+ }
+ }
+ ]
+}
+------------------------------------------------------------
+// NOTCONSOLE
+======
+
+`tool_choice`::
+(Optional, string or object)
+Controls which tool is called by the model.
++
+String representation:::
+One of `auto`, `none`, or `requrired`. `auto` allows the model to choose between calling tools and generating a message. `none` causes the model to not call any tools. `required` forces the model to call one or more tools.
++
+Object representation:::
++
+.Structure
+[%collapsible%closed]
+=====
+`type`::
+(Required, string)
+The type of the tool. This must be set to the value `function`.
++
+`function`::
+(Required, object)
++
+`name`:::
+(Required, string)
+The name of the function to call.
+=====
++
+.Examples
+[%collapsible%closed]
+=====
+[source,js]
+------------------------------------------------------------
+{
+ "tool_choice": {
+ "type": "function",
+ "function": {
+ "name": "get_current_weather"
+ }
+ }
+}
+------------------------------------------------------------
+// NOTCONSOLE
+=====
+
+`top_p`::
+(Optional, float)
+Nucleus sampling, an alternative to sampling with temperature.
+
+[discrete]
+[[chat-completion-inference-api-example]]
+==== {api-examples-title}
+
+The following example performs a chat completion on the example question with streaming.
+
+
+[source,console]
+------------------------------------------------------------
+POST _inference/chat_completion/openai-completion/_stream
+{
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is Elastic?"
+ }
+ ]
+}
+------------------------------------------------------------
+// TEST[skip:TBD]
+
+The following example performs a chat completion using an Assistant message with `tool_calls`.
+
+[source,console]
+------------------------------------------------------------
+POST _inference/chat_completion/openai-completion/_stream
+{
+ "messages": [
+ {
+ "role": "assistant",
+ "content": "Let's find out what the weather is",
+ "tool_calls": [ <1>
+ {
+ "id": "call_KcAjWtAww20AihPHphUh46Gd",
+ "type": "function",
+ "function": {
+ "name": "get_current_weather",
+ "arguments": "{\"location\":\"Boston, MA\"}"
+ }
+ }
+ ]
+ },
+ { <2>
+ "role": "tool",
+ "content": "The weather is cold",
+ "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd"
+ }
+ ]
+}
+------------------------------------------------------------
+// TEST[skip:TBD]
+
+<1> Each tool call needs a corresponding Tool message.
+<2> The corresponding Tool message.
+
+The following example performs a chat completion using a User message with `tools` and `tool_choice`.
+
+[source,console]
+------------------------------------------------------------
+POST _inference/chat_completion/openai-completion/_stream
+{
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "What's the price of a scarf?"
+ }
+ ]
+ }
+ ],
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_price",
+ "description": "Get the current price of a item",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "item": {
+ "id": "123"
+ }
+ }
+ }
+ }
+ }
+ ],
+ "tool_choice": {
+ "type": "function",
+ "function": {
+ "name": "get_current_price"
+ }
+ }
+}
+------------------------------------------------------------
+// TEST[skip:TBD]
+
+The API returns the following response when a request is made to the OpenAI service:
+
+
+[source,txt]
+------------------------------------------------------------
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":"","role":"assistant"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
+
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":Elastic"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
+
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[{"delta":{"content":" is"},"index":0}],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk"}}
+
+(...)
+
+event: message
+data: {"chat_completion":{"id":"chatcmpl-Ae0TWsy2VPnSfBbv5UztnSdYUMFP3","choices":[],"model":"gpt-4o-2024-08-06","object":"chat.completion.chunk","usage":{"completion_tokens":28,"prompt_tokens":16,"total_tokens":44}}} <1>
+
+event: message
+data: [DONE]
+------------------------------------------------------------
+// NOTCONSOLE
+
+<1> The last object message of the stream contains the token usage information.
diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc
index ca273afc478ea..4f27409973ca2 100644
--- a/docs/reference/inference/inference-apis.asciidoc
+++ b/docs/reference/inference/inference-apis.asciidoc
@@ -26,6 +26,7 @@ the following APIs to manage {infer} models and perform {infer}:
* <>
* <>
* <>
+* <>
* <>
[[inference-landscape]]
@@ -34,9 +35,9 @@ image::images/inference-landscape.jpg[A representation of the Elastic inference
An {infer} endpoint enables you to use the corresponding {ml} model without
manual deployment and apply it to your data at ingestion time through
-<>.
+<>.
-Choose a model from your provider or use ELSER – a retrieval model trained by
+Choose a model from your provider or use ELSER – a retrieval model trained by
Elastic –, then create an {infer} endpoint by the <>.
Now use <> to perform
<> on your data.
@@ -67,7 +68,7 @@ The following list contains the default {infer} endpoints listed by `inference_i
Use the `inference_id` of the endpoint in a <> field definition or when creating an <>.
The API call will automatically download and deploy the model which might take a couple of minutes.
Default {infer} enpoints have {ml-docs}/ml-nlp-auto-scale.html#nlp-model-adaptive-allocations[adaptive allocations] enabled.
-For these models, the minimum number of allocations is `0`.
+For these models, the minimum number of allocations is `0`.
If there is no {infer} activity that uses the endpoint, the number of allocations will scale down to `0` automatically after 15 minutes.
@@ -84,7 +85,7 @@ Returning a long document in search results is less useful than providing the mo
Each chunk will include the text subpassage and the corresponding embedding generated from it.
By default, documents are split into sentences and grouped in sections up to 250 words with 1 sentence overlap so that each chunk shares a sentence with the previous chunk.
-Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break.
+Overlapping ensures continuity and prevents vital contextual information in the input text from being lost by a hard break.
{es} uses the https://unicode-org.github.io/icu-docs/[ICU4J] library to detect word and sentence boundaries for chunking.
https://unicode-org.github.io/icu/userguide/boundaryanalysis/#word-boundary[Word boundaries] are identified by following a series of rules, not just the presence of a whitespace character.
@@ -135,6 +136,7 @@ PUT _inference/sparse_embedding/small_chunk_size
include::delete-inference.asciidoc[]
include::get-inference.asciidoc[]
include::post-inference.asciidoc[]
+include::chat-completion-inference.asciidoc[]
include::put-inference.asciidoc[]
include::stream-inference.asciidoc[]
include::update-inference.asciidoc[]
diff --git a/docs/reference/inference/inference-shared.asciidoc b/docs/reference/inference/inference-shared.asciidoc
index da497c6581e5d..b133c54082810 100644
--- a/docs/reference/inference/inference-shared.asciidoc
+++ b/docs/reference/inference/inference-shared.asciidoc
@@ -41,7 +41,7 @@ end::chunking-settings[]
tag::chunking-settings-max-chunking-size[]
Specifies the maximum size of a chunk in words.
Defaults to `250`.
-This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).
+This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy).
end::chunking-settings-max-chunking-size[]
tag::chunking-settings-overlap[]
@@ -63,4 +63,48 @@ Specifies the chunking strategy.
It could be either `sentence` or `word`.
end::chunking-settings-strategy[]
+tag::chat-completion-schema-content-with-examples[]
+.Examples
+[%collapsible%closed]
+======
+String example
+[source,js]
+------------------------------------------------------------
+{
+ "content": "Some string"
+}
+------------------------------------------------------------
+// NOTCONSOLE
+
+Object example
+[source,js]
+------------------------------------------------------------
+{
+ "content": [
+ {
+ "text": "Some text",
+ "type": "text"
+ }
+ ]
+}
+------------------------------------------------------------
+// NOTCONSOLE
+======
+
+String representation:::
+(Required, string)
+The text content.
++
+Object representation:::
+`text`::::
+(Required, string)
+The text content.
++
+`type`::::
+(Required, string)
+This must be set to the value `text`.
+end::chat-completion-schema-content-with-examples[]
+tag::chat-completion-docs[]
+For more information on how to use the `chat_completion` task type, please refer to the <>.
+end::chat-completion-docs[]
diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc
index f0c15323863d7..da07d1d3e7d84 100644
--- a/docs/reference/inference/put-inference.asciidoc
+++ b/docs/reference/inference/put-inference.asciidoc
@@ -42,7 +42,7 @@ include::inference-shared.asciidoc[tag=inference-id]
include::inference-shared.asciidoc[tag=task-type]
+
--
-Refer to the service list in the <> for the available task types.
+Refer to the service list in the <> for the available task types.
--
@@ -61,7 +61,7 @@ The create {infer} API enables you to create an {infer} endpoint and configure a
The following services are available through the {infer} API.
-You can find the available task types next to the service name.
+You can find the available task types next to the service name.
Click the links to review the configuration details of the services:
* <> (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)
@@ -73,10 +73,10 @@ Click the links to review the configuration details of the services:
* <> (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)
* <> (`sparse_embedding`)
* <> (`completion`, `text_embedding`)
-* <> (`rerank`, `text_embedding`)
+* <> (`rerank`, `text_embedding`)
* <> (`text_embedding`)
* <> (`text_embedding`)
-* <> (`completion`, `text_embedding`)
+* <> (`chat_completion`, `completion`, `text_embedding`)
* <> (`text_embedding`)
* <> (`text_embedding`, `rerank`)
diff --git a/docs/reference/inference/service-openai.asciidoc b/docs/reference/inference/service-openai.asciidoc
index e4be7f18e09dd..590f280b1c494 100644
--- a/docs/reference/inference/service-openai.asciidoc
+++ b/docs/reference/inference/service-openai.asciidoc
@@ -31,10 +31,18 @@ include::inference-shared.asciidoc[tag=task-type]
--
Available task types:
+* `chat_completion`,
* `completion`,
* `text_embedding`.
--
+[NOTE]
+====
+The `chat_completion` task type only supports streaming and only through the `_unified` API.
+
+include::inference-shared.asciidoc[tag=chat-completion-docs]
+====
+
[discrete]
[[infer-service-openai-api-request-body]]
==== {api-request-body-title}
@@ -61,7 +69,7 @@ include::inference-shared.asciidoc[tag=chunking-settings-strategy]
`service`::
(Required, string)
-The type of service supported for the specified task type. In this case,
+The type of service supported for the specified task type. In this case,
`openai`.
`service_settings`::
@@ -176,4 +184,4 @@ PUT _inference/completion/openai-completion
}
}
------------------------------------------------------------
-// TEST[skip:TBD]
\ No newline at end of file
+// TEST[skip:TBD]
diff --git a/docs/reference/inference/stream-inference.asciidoc b/docs/reference/inference/stream-inference.asciidoc
index 42abb589f9afd..4a3ce31909712 100644
--- a/docs/reference/inference/stream-inference.asciidoc
+++ b/docs/reference/inference/stream-inference.asciidoc
@@ -38,8 +38,12 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo
==== {api-description-title}
The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.
-It only works with the `completion` task type.
+It only works with the `completion` and `chat_completion` task types.
+[NOTE]
+====
+include::inference-shared.asciidoc[tag=chat-completion-docs]
+====
[discrete]
[[stream-inference-api-path-params]]
diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc
index c24135a370914..27220f0d85149 100644
--- a/docs/reference/query-dsl/bool-query.asciidoc
+++ b/docs/reference/query-dsl/bool-query.asciidoc
@@ -13,21 +13,24 @@ occurrence types are:
|=======================================================================
|Occur |Description
|`must` |The clause (query) must appear in matching documents and will
-contribute to the score.
+contribute to the score. Each query defined under a `must` acts as a logical "AND", returning only documents that match _all_ the specified queries.
+
+|`should` |The clause (query) should appear in the matching document. Each query defined under a `should` acts as a logical "OR", returning documents that match _any_ of the specified queries.
|`filter` |The clause (query) must appear in matching documents. However unlike
`must` the score of the query will be ignored. Filter clauses are executed
in <>, meaning that scoring is ignored
-and clauses are considered for caching.
-
-|`should` |The clause (query) should appear in the matching document.
+and clauses are considered for caching. Each query defined under a `filter` acts as a logical "AND", returning only documents that match _all_ the specified queries.
|`must_not` |The clause (query) must not appear in the matching
documents. Clauses are executed in <> meaning
that scoring is ignored and clauses are considered for caching. Because scoring is
-ignored, a score of `0` for all documents is returned.
+ignored, a score of `0` for all documents is returned. Each query defined under a `must_not` acts as a logical "NOT", returning only documents that do not match any of the specified queries.
+
|=======================================================================
+The `must` and `should` clauses function as logical AND, OR operators, contributing to the scoring of results. However, these results will not be cached for faster retrieval. In contrast, the `filter` and `must_not` clauses are used to include or exclude results without impacting the score, unless used within a `constant_score` query.
+
The `bool` query takes a _more-matches-is-better_ approach, so the score from
each matching `must` or `should` clause will be added together to provide the
final `_score` for each document.
diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc
index c3bf84fa600d2..9c0f0092214ed 100644
--- a/docs/reference/redirects.asciidoc
+++ b/docs/reference/redirects.asciidoc
@@ -156,10 +156,16 @@ See <>.
The freeze index API was removed in 8.0.
// tag::frozen-removal-explanation[]
Frozen indices are no longer useful due to
-https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent
-improvements in heap memory usage].
+https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[improvements
+in heap memory usage].
// end::frozen-removal-explanation[]
+[role="exclude",id="unfreeze-index-api"]
+=== Unfreeze index API
+
+The unfreeze index API was removed in 9.0.
+include::redirects.asciidoc[tag=frozen-removal-explanation]
+
[role="exclude",id="ilm-freeze"]
=== Freeze {ilm-init} action
@@ -1749,8 +1755,10 @@ See <>.
=== Frozen indices
// tag::frozen-index-redirect[]
-
-For API documentation, see <>.
+Older versions of {es} provided the option to reduce the amount of data kept in memory for an index, at the expense of
+increasing search latency. This was known as 'freezing' the index.
+include::redirects.asciidoc[tag=frozen-removal-explanation]
+The freeze index API was removed in 8.0, and the unfreeze index API was removed in 9.0.
// end::frozen-index-redirect[]
[role="exclude",id="best_practices"]
diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc
index 1dee7f0840ade..1912a020ab0be 100644
--- a/docs/reference/sql/language/indices.asciidoc
+++ b/docs/reference/sql/language/indices.asciidoc
@@ -100,7 +100,7 @@ requires the keyword `LIKE` for SQL `LIKE` pattern.
[[sql-index-frozen]]
=== Frozen Indices
-By default, {es-sql} doesn't search <>. To
+By default, {es-sql} doesn't search <>. To
search frozen indices, use one of the following features:
dedicated configuration parameter::
diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java
index 3a359eb921fc8..69fc57973f68a 100644
--- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java
+++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java
@@ -13,10 +13,16 @@
import java.io.PrintStream;
import java.io.PrintWriter;
import java.net.ContentHandlerFactory;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
import java.net.DatagramSocketImplFactory;
import java.net.FileNameMap;
+import java.net.InetAddress;
+import java.net.MulticastSocket;
+import java.net.NetworkInterface;
import java.net.ProxySelector;
import java.net.ResponseCache;
+import java.net.SocketAddress;
import java.net.SocketImplFactory;
import java.net.URL;
import java.net.URLStreamHandler;
@@ -189,4 +195,28 @@ public interface EntitlementChecker {
// The only implementation of SSLSession#getSessionContext(); unfortunately it's an interface, so we need to check the implementation
void check$sun_security_ssl_SSLSessionImpl$getSessionContext(Class> callerClass, SSLSession sslSession);
+
+ void check$java_net_DatagramSocket$bind(Class> callerClass, DatagramSocket that, SocketAddress addr);
+
+ void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, InetAddress addr);
+
+ void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, SocketAddress addr);
+
+ void check$java_net_DatagramSocket$send(Class> callerClass, DatagramSocket that, DatagramPacket p);
+
+ void check$java_net_DatagramSocket$receive(Class> callerClass, DatagramSocket that, DatagramPacket p);
+
+ void check$java_net_DatagramSocket$joinGroup(Class> callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_DatagramSocket$leaveGroup(Class> callerClass, DatagramSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_MulticastSocket$joinGroup(Class> callerClass, MulticastSocket that, InetAddress addr);
+
+ void check$java_net_MulticastSocket$joinGroup(Class> callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_MulticastSocket$leaveGroup(Class> callerClass, MulticastSocket that, InetAddress addr);
+
+ void check$java_net_MulticastSocket$leaveGroup(Class> callerClass, MulticastSocket that, SocketAddress addr, NetworkInterface ni);
+
+ void check$java_net_MulticastSocket$send(Class> callerClass, MulticastSocket that, DatagramPacket p, byte ttl);
}
diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java
index 6dbb684c71514..fae873123528d 100644
--- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java
+++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/DummyImplementations.java
@@ -9,8 +9,15 @@
package org.elasticsearch.entitlement.qa.common;
+import java.io.IOException;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
+import java.net.DatagramSocketImpl;
import java.net.InetAddress;
+import java.net.NetworkInterface;
import java.net.Socket;
+import java.net.SocketAddress;
+import java.net.SocketException;
import java.security.cert.Certificate;
import java.text.BreakIterator;
import java.text.Collator;
@@ -327,8 +334,77 @@ public Socket createSocket(Socket s, String host, int port, boolean autoClose) {
}
}
+ static class DummyDatagramSocket extends DatagramSocket {
+ DummyDatagramSocket() throws SocketException {
+ super(new DatagramSocketImpl() {
+ @Override
+ protected void create() throws SocketException {}
+
+ @Override
+ protected void bind(int lport, InetAddress laddr) throws SocketException {}
+
+ @Override
+ protected void send(DatagramPacket p) throws IOException {}
+
+ @Override
+ protected int peek(InetAddress i) throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected int peekData(DatagramPacket p) throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected void receive(DatagramPacket p) throws IOException {}
+
+ @Override
+ protected void setTTL(byte ttl) throws IOException {}
+
+ @Override
+ protected byte getTTL() throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected void setTimeToLive(int ttl) throws IOException {}
+
+ @Override
+ protected int getTimeToLive() throws IOException {
+ return 0;
+ }
+
+ @Override
+ protected void join(InetAddress inetaddr) throws IOException {}
+
+ @Override
+ protected void leave(InetAddress inetaddr) throws IOException {}
+
+ @Override
+ protected void joinGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {}
+
+ @Override
+ protected void leaveGroup(SocketAddress mcastaddr, NetworkInterface netIf) throws IOException {}
+
+ @Override
+ protected void close() {}
+
+ @Override
+ public void setOption(int optID, Object value) throws SocketException {}
+
+ @Override
+ public Object getOption(int optID) throws SocketException {
+ return null;
+ }
+
+ @Override
+ protected void connect(InetAddress address, int port) throws SocketException {}
+ });
+ }
+ }
+
private static RuntimeException unexpected() {
return new IllegalStateException("This method isn't supposed to be called");
}
-
}
diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java
index 1dd8daf556226..3a5480f468528 100644
--- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java
+++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java
@@ -11,6 +11,7 @@
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
+import org.elasticsearch.core.CheckedRunnable;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyBreakIteratorProvider;
import org.elasticsearch.entitlement.qa.common.DummyImplementations.DummyCalendarDataProvider;
@@ -32,14 +33,18 @@
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
-import java.io.UncheckedIOException;
+import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.HttpURLConnection;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.net.MalformedURLException;
+import java.net.NetworkInterface;
import java.net.ProxySelector;
import java.net.ResponseCache;
import java.net.ServerSocket;
import java.net.Socket;
+import java.net.SocketException;
import java.net.URL;
import java.net.URLClassLoader;
import java.net.URLConnection;
@@ -71,20 +76,20 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
public static final Thread NO_OP_SHUTDOWN_HOOK = new Thread(() -> {}, "Shutdown hook for testing");
private final String prefix;
- record CheckAction(Runnable action, boolean isAlwaysDeniedToPlugins) {
+ record CheckAction(CheckedRunnable action, boolean isAlwaysDeniedToPlugins) {
/**
* These cannot be granted to plugins, so our test plugins cannot test the "allowed" case.
* Used both for always-denied entitlements as well as those granted only to the server itself.
*/
- static CheckAction deniedToPlugins(Runnable action) {
+ static CheckAction deniedToPlugins(CheckedRunnable action) {
return new CheckAction(action, true);
}
- static CheckAction forPlugins(Runnable action) {
+ static CheckAction forPlugins(CheckedRunnable action) {
return new CheckAction(action, false);
}
- static CheckAction alwaysDenied(Runnable action) {
+ static CheckAction alwaysDenied(CheckedRunnable action) {
return new CheckAction(action, true);
}
}
@@ -142,7 +147,13 @@ static CheckAction alwaysDenied(Runnable action) {
entry("createURLStreamHandlerProvider", alwaysDenied(RestEntitlementsCheckAction::createURLStreamHandlerProvider)),
entry("createURLWithURLStreamHandler", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler)),
entry("createURLWithURLStreamHandler2", alwaysDenied(RestEntitlementsCheckAction::createURLWithURLStreamHandler2)),
- entry("sslSessionImpl_getSessionContext", alwaysDenied(RestEntitlementsCheckAction::sslSessionImplGetSessionContext))
+ entry("sslSessionImpl_getSessionContext", alwaysDenied(RestEntitlementsCheckAction::sslSessionImplGetSessionContext)),
+ entry("datagram_socket_bind", forPlugins(RestEntitlementsCheckAction::bindDatagramSocket)),
+ entry("datagram_socket_connect", forPlugins(RestEntitlementsCheckAction::connectDatagramSocket)),
+ entry("datagram_socket_send", forPlugins(RestEntitlementsCheckAction::sendDatagramSocket)),
+ entry("datagram_socket_receive", forPlugins(RestEntitlementsCheckAction::receiveDatagramSocket)),
+ entry("datagram_socket_join_group", forPlugins(RestEntitlementsCheckAction::joinGroupDatagramSocket)),
+ entry("datagram_socket_leave_group", forPlugins(RestEntitlementsCheckAction::leaveGroupDatagramSocket))
);
private static void createURLStreamHandlerProvider() {
@@ -154,43 +165,33 @@ public URLStreamHandler createURLStreamHandler(String protocol) {
};
}
- private static void sslSessionImplGetSessionContext() {
+ private static void sslSessionImplGetSessionContext() throws IOException {
SSLSocketFactory factory = HttpsURLConnection.getDefaultSSLSocketFactory();
try (SSLSocket socket = (SSLSocket) factory.createSocket()) {
SSLSession session = socket.getSession();
session.getSessionContext();
- } catch (IOException e) {
- throw new RuntimeException(e);
}
}
@SuppressWarnings("deprecation")
- private static void createURLWithURLStreamHandler() {
- try {
- var x = new URL("http", "host", 1234, "file", new URLStreamHandler() {
- @Override
- protected URLConnection openConnection(URL u) {
- return null;
- }
- });
- } catch (MalformedURLException e) {
- throw new RuntimeException(e);
- }
+ private static void createURLWithURLStreamHandler() throws MalformedURLException {
+ var x = new URL("http", "host", 1234, "file", new URLStreamHandler() {
+ @Override
+ protected URLConnection openConnection(URL u) {
+ return null;
+ }
+ });
}
@SuppressWarnings("deprecation")
- private static void createURLWithURLStreamHandler2() {
- try {
- var x = new URL(null, "spec", new URLStreamHandler() {
- @Override
- protected URLConnection openConnection(URL u) {
- return null;
- }
- });
- } catch (MalformedURLException e) {
- throw new RuntimeException(e);
- }
+ private static void createURLWithURLStreamHandler2() throws MalformedURLException {
+ var x = new URL(null, "spec", new URLStreamHandler() {
+ @Override
+ protected URLConnection openConnection(URL u) {
+ return null;
+ }
+ });
}
private static void createInetAddressResolverProvider() {
@@ -215,12 +216,8 @@ private static void setDefaultProxySelector() {
ProxySelector.setDefault(null);
}
- private static void setDefaultSSLContext() {
- try {
- SSLContext.setDefault(SSLContext.getDefault());
- } catch (NoSuchAlgorithmException e) {
- throw new RuntimeException(e);
- }
+ private static void setDefaultSSLContext() throws NoSuchAlgorithmException {
+ SSLContext.setDefault(SSLContext.getDefault());
}
private static void setDefaultHostnameVerifier() {
@@ -246,28 +243,18 @@ private static void systemExit() {
System.exit(123);
}
- private static void createClassLoader() {
+ private static void createClassLoader() throws IOException {
try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
logger.info("Created URLClassLoader [{}]", classLoader.getName());
- } catch (IOException e) {
- throw new UncheckedIOException(e);
}
}
- private static void processBuilder_start() {
- try {
- new ProcessBuilder("").start();
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void processBuilder_start() throws IOException {
+ new ProcessBuilder("").start();
}
- private static void processBuilder_startPipeline() {
- try {
- ProcessBuilder.startPipeline(List.of());
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void processBuilder_startPipeline() throws IOException {
+ ProcessBuilder.startPipeline(List.of());
}
private static void setHttpsConnectionProperties() {
@@ -355,12 +342,8 @@ private static void setHttpsConnectionProperties() {
@SuppressWarnings("deprecation")
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
- private static void datagramSocket$$setDatagramSocketImplFactory() {
- try {
- DatagramSocket.setDatagramSocketImplFactory(() -> { throw new IllegalStateException(); });
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void datagramSocket$$setDatagramSocketImplFactory() throws IOException {
+ DatagramSocket.setDatagramSocketImplFactory(() -> { throw new IllegalStateException(); });
}
private static void httpURLConnection$$setFollowRedirects() {
@@ -369,22 +352,14 @@ private static void setHttpsConnectionProperties() {
@SuppressWarnings("deprecation")
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
- private static void serverSocket$$setSocketFactory() {
- try {
- ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); });
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void serverSocket$$setSocketFactory() throws IOException {
+ ServerSocket.setSocketFactory(() -> { throw new IllegalStateException(); });
}
@SuppressWarnings("deprecation")
@SuppressForbidden(reason = "We're required to prevent calls to this forbidden API")
- private static void socket$$setSocketImplFactory() {
- try {
- Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); });
- } catch (IOException e) {
- throw new IllegalStateException(e);
- }
+ private static void socket$$setSocketImplFactory() throws IOException {
+ Socket.setSocketImplFactory(() -> { throw new IllegalStateException(); });
}
private static void url$$setURLStreamHandlerFactory() {
@@ -399,6 +374,51 @@ private static void setHttpsConnectionProperties() {
URLConnection.setContentHandlerFactory(__ -> { throw new IllegalStateException(); });
}
+ private static void bindDatagramSocket() throws SocketException {
+ try (var socket = new DatagramSocket(null)) {
+ socket.bind(null);
+ }
+ }
+
+ @SuppressForbidden(reason = "testing entitlements")
+ private static void connectDatagramSocket() throws SocketException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.connect(new InetSocketAddress(1234));
+ }
+ }
+
+ private static void joinGroupDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.joinGroup(
+ new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234),
+ NetworkInterface.getByIndex(0)
+ );
+ }
+ }
+
+ private static void leaveGroupDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.leaveGroup(
+ new InetSocketAddress(InetAddress.getByAddress(new byte[] { (byte) 230, 0, 0, 1 }), 1234),
+ NetworkInterface.getByIndex(0)
+ );
+ }
+ }
+
+ @SuppressForbidden(reason = "testing entitlements")
+ private static void sendDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.send(new DatagramPacket(new byte[] { 0 }, 1, InetAddress.getLocalHost(), 1234));
+ }
+ }
+
+ @SuppressForbidden(reason = "testing entitlements")
+ private static void receiveDatagramSocket() throws IOException {
+ try (var socket = new DummyImplementations.DummyDatagramSocket()) {
+ socket.receive(new DatagramPacket(new byte[1], 1, InetAddress.getLocalHost(), 1234));
+ }
+ }
+
public RestEntitlementsCheckAction(String prefix) {
this.prefix = prefix;
}
diff --git a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
index 30fc9f0abeec0..05a94f09264a8 100644
--- a/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
+++ b/libs/entitlement/qa/entitlement-allowed-nonmodular/src/main/plugin-metadata/entitlement-policy.yaml
@@ -1,3 +1,8 @@
ALL-UNNAMED:
- create_class_loader
- set_https_connection_properties
+ - network:
+ actions:
+ - listen
+ - accept
+ - connect
diff --git a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml
index 0a25570a9f624..0d2c66c2daa2c 100644
--- a/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml
+++ b/libs/entitlement/qa/entitlement-allowed/src/main/plugin-metadata/entitlement-policy.yaml
@@ -1,3 +1,8 @@
org.elasticsearch.entitlement.qa.common:
- create_class_loader
- set_https_connection_properties
+ - network:
+ actions:
+ - listen
+ - accept
+ - connect
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
index ca4aaceabcebf..dd39ec3c5fe43 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java
@@ -10,16 +10,23 @@
package org.elasticsearch.entitlement.runtime.api;
import org.elasticsearch.entitlement.bridge.EntitlementChecker;
+import org.elasticsearch.entitlement.runtime.policy.NetworkEntitlement;
import org.elasticsearch.entitlement.runtime.policy.PolicyManager;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.net.ContentHandlerFactory;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
import java.net.DatagramSocketImplFactory;
import java.net.FileNameMap;
+import java.net.InetAddress;
+import java.net.MulticastSocket;
+import java.net.NetworkInterface;
import java.net.ProxySelector;
import java.net.ResponseCache;
+import java.net.SocketAddress;
import java.net.SocketImplFactory;
import java.net.URL;
import java.net.URLStreamHandler;
@@ -349,4 +356,68 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) {
public void check$sun_security_ssl_SSLSessionImpl$getSessionContext(Class> callerClass, SSLSession sslSession) {
policyManager.checkReadSensitiveNetworkInformation(callerClass);
}
+
+ @Override
+ public void check$java_net_DatagramSocket$bind(Class> callerClass, DatagramSocket that, SocketAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.LISTEN_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, InetAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$connect(Class> callerClass, DatagramSocket that, SocketAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$send(Class> callerClass, DatagramSocket that, DatagramPacket p) {
+ var actions = NetworkEntitlement.CONNECT_ACTION;
+ if (p.getAddress().isMulticastAddress()) {
+ actions |= NetworkEntitlement.ACCEPT_ACTION;
+ }
+ policyManager.checkNetworkAccess(callerClass, actions);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$receive(Class> callerClass, DatagramSocket that, DatagramPacket p) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$joinGroup(Class> caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_DatagramSocket$leaveGroup(Class> caller, DatagramSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$joinGroup(Class> callerClass, MulticastSocket that, InetAddress addr) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$joinGroup(Class> caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$leaveGroup(Class> caller, MulticastSocket that, InetAddress addr) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$leaveGroup(Class> caller, MulticastSocket that, SocketAddress addr, NetworkInterface ni) {
+ policyManager.checkNetworkAccess(caller, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
+
+ @Override
+ public void check$java_net_MulticastSocket$send(Class> callerClass, MulticastSocket that, DatagramPacket p, byte ttl) {
+ policyManager.checkNetworkAccess(callerClass, NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION);
+ }
}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java
new file mode 100644
index 0000000000000..b6c6a41d5be7f
--- /dev/null
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlement.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.runtime.policy;
+
+import org.elasticsearch.core.Strings;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.StringJoiner;
+
+import static java.util.Map.entry;
+
+/**
+ * Describes a network entitlement (sockets) with actions.
+ */
+public class NetworkEntitlement implements Entitlement {
+
+ public static final int LISTEN_ACTION = 0x1;
+ public static final int CONNECT_ACTION = 0x2;
+ public static final int ACCEPT_ACTION = 0x4;
+
+ static final String LISTEN = "listen";
+ static final String CONNECT = "connect";
+ static final String ACCEPT = "accept";
+
+ private static final Map ACTION_MAP = Map.ofEntries(
+ entry(LISTEN, LISTEN_ACTION),
+ entry(CONNECT, CONNECT_ACTION),
+ entry(ACCEPT, ACCEPT_ACTION)
+ );
+
+ private final int actions;
+
+ @ExternalEntitlement(parameterNames = { "actions" }, esModulesOnly = false)
+ public NetworkEntitlement(List actionsList) {
+
+ int actionsInt = 0;
+
+ for (String actionString : actionsList) {
+ var action = ACTION_MAP.get(actionString);
+ if (action == null) {
+ throw new IllegalArgumentException("unknown network action [" + actionString + "]");
+ }
+ if ((actionsInt & action) == action) {
+ throw new IllegalArgumentException(Strings.format("network action [%s] specified multiple times", actionString));
+ }
+ actionsInt |= action;
+ }
+
+ this.actions = actionsInt;
+ }
+
+ public static Object printActions(int actions) {
+ var joiner = new StringJoiner(",");
+ for (var entry : ACTION_MAP.entrySet()) {
+ var action = entry.getValue();
+ if ((actions & action) == action) {
+ joiner.add(entry.getKey());
+ }
+ }
+ return joiner.toString();
+ }
+
+ /**
+ * For the actions to match, the actions present in this entitlement must be a superset
+ * of the actions required by a check.
+ * There is only one "negative" case (action required by the check but not present in the entitlement),
+ * and it can be expressed efficiently via this truth table:
+ * this.actions | requiredActions |
+ * 0 | 0 | 0
+ * 0 | 1 | 1 --> NOT this.action AND requiredActions
+ * 1 | 0 | 0
+ * 1 | 1 | 0
+ *
+ * @param requiredActions the actions required to be present for a check to pass
+ * @return true if requiredActions are present, false otherwise
+ */
+ public boolean matchActions(int requiredActions) {
+ return (~this.actions & requiredActions) == 0;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ NetworkEntitlement that = (NetworkEntitlement) o;
+ return actions == that.actions;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(actions);
+ }
+
+ @Override
+ public String toString() {
+ return "NetworkEntitlement{actions=" + actions + '}';
+ }
+}
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
index 57449a23a8215..f039fbda3dfbd 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java
@@ -52,7 +52,11 @@ public boolean hasEntitlement(Class extends Entitlement> entitlementClass) {
}
public Stream getEntitlements(Class entitlementClass) {
- return entitlementsByType.get(entitlementClass).stream().map(entitlementClass::cast);
+ var entitlements = entitlementsByType.get(entitlementClass);
+ if (entitlements == null) {
+ return Stream.empty();
+ }
+ return entitlements.stream().map(entitlementClass::cast);
}
}
@@ -190,6 +194,34 @@ private String operationDescription(String methodName) {
return methodName.substring(methodName.indexOf('$'));
}
+ public void checkNetworkAccess(Class> callerClass, int actions) {
+ var requestingClass = requestingClass(callerClass);
+ if (isTriviallyAllowed(requestingClass)) {
+ return;
+ }
+
+ ModuleEntitlements entitlements = getEntitlements(requestingClass);
+ if (entitlements.getEntitlements(NetworkEntitlement.class).anyMatch(n -> n.matchActions(actions))) {
+ logger.debug(
+ () -> Strings.format(
+ "Entitled: class [%s], module [%s], entitlement [Network], actions [Ox%X]",
+ requestingClass,
+ requestingClass.getModule().getName(),
+ actions
+ )
+ );
+ return;
+ }
+ throw new NotEntitledException(
+ Strings.format(
+ "Missing entitlement: class [%s], module [%s], entitlement [Network], actions [%s]",
+ requestingClass,
+ requestingClass.getModule().getName(),
+ NetworkEntitlement.printActions(actions)
+ )
+ );
+ }
+
private void checkEntitlementPresent(Class> callerClass, Class extends Entitlement> entitlementClass) {
var requestingClass = requestingClass(callerClass);
if (isTriviallyAllowed(requestingClass)) {
diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java
index 013acf8f22fae..ac4d4afdd97f8 100644
--- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java
+++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java
@@ -37,7 +37,8 @@ public class PolicyParser {
private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of(
FileEntitlement.class,
CreateClassLoaderEntitlement.class,
- SetHttpsConnectionPropertiesEntitlement.class
+ SetHttpsConnectionPropertiesEntitlement.class,
+ NetworkEntitlement.class
).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity()));
protected final XContentParser policyParser;
diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java
new file mode 100644
index 0000000000000..91051d48c365f
--- /dev/null
+++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/NetworkEntitlementTests.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the "Elastic License
+ * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
+ * Public License v 1"; you may not use this file except in compliance with, at
+ * your election, the "Elastic License 2.0", the "GNU Affero General Public
+ * License v3.0 only", or the "Server Side Public License, v 1".
+ */
+
+package org.elasticsearch.entitlement.runtime.policy;
+
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.List;
+
+import static org.hamcrest.Matchers.is;
+
+public class NetworkEntitlementTests extends ESTestCase {
+
+ public void testMatchesActions() {
+ var listenEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.LISTEN));
+ var emptyEntitlement = new NetworkEntitlement(List.of());
+ var connectAcceptEntitlement = new NetworkEntitlement(List.of(NetworkEntitlement.CONNECT, NetworkEntitlement.ACCEPT));
+
+ assertThat(listenEntitlement.matchActions(0), is(true));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(true));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(listenEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+
+ assertThat(connectAcceptEntitlement.matchActions(0), is(true));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(true));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(true));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(connectAcceptEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(true));
+
+ assertThat(emptyEntitlement.matchActions(0), is(true));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.LISTEN_ACTION | NetworkEntitlement.CONNECT_ACTION), is(false));
+ assertThat(emptyEntitlement.matchActions(NetworkEntitlement.CONNECT_ACTION | NetworkEntitlement.ACCEPT_ACTION), is(false));
+ }
+}
diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java
index 4d17fc92e1578..1e0c31d2280b8 100644
--- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java
+++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java
@@ -52,6 +52,22 @@ public void testPolicyBuilderOnExternalPlugin() throws IOException {
assertEquals(expected, parsedPolicy);
}
+ public void testParseNetwork() throws IOException {
+ Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
+ entitlement-module-name:
+ - network:
+ actions:
+ - listen
+ - accept
+ - connect
+ """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml", false).parsePolicy();
+ Policy expected = new Policy(
+ "test-policy.yaml",
+ List.of(new Scope("entitlement-module-name", List.of(new NetworkEntitlement(List.of("listen", "accept", "connect")))))
+ );
+ assertEquals(expected, parsedPolicy);
+ }
+
public void testParseCreateClassloader() throws IOException {
Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream("""
entitlement-module-name:
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java
index 339a4ec24ca13..43447cfa21a62 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java
@@ -92,14 +92,7 @@ public List> getSettings() {
APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING,
APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING,
APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING,
- APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES,
- // The settings below are deprecated and are currently kept as fallback.
- APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING,
- APMAgentSettings.TRACING_APM_API_KEY_SETTING,
- APMAgentSettings.TRACING_APM_ENABLED_SETTING,
- APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING,
- APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING,
- APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES
+ APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES
);
}
}
diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
index f66683a787bc0..8647761e2defe 100644
--- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
+++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java
@@ -25,9 +25,7 @@
import java.util.List;
import java.util.Objects;
import java.util.Set;
-import java.util.function.Function;
-import static org.elasticsearch.common.settings.Setting.Property.Deprecated;
import static org.elasticsearch.common.settings.Setting.Property.NodeScope;
import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic;
@@ -101,9 +99,6 @@ public void setAgentSetting(String key, String value) {
private static final String TELEMETRY_SETTING_PREFIX = "telemetry.";
- // The old legacy prefix
- private static final String LEGACY_TRACING_APM_SETTING_PREFIX = "tracing.apm.";
-
/**
* Allow-list of APM agent config keys users are permitted to configure.
* @see APM Java Agent Configuration
@@ -248,56 +243,24 @@ private static Setting concreteAgentSetting(String namespace, String qua
public static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting(
TELEMETRY_SETTING_PREFIX + "agent.",
- LEGACY_TRACING_APM_SETTING_PREFIX + "agent.",
- (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX)
- ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated)
- : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
+ null, // no fallback
+ (namespace, qualifiedKey) -> concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.
- */
- @Deprecated
- public static final Setting> TRACING_APM_NAMES_INCLUDE_SETTING = Setting.stringListSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "names.include",
- OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
- public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting(
+ public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting(
TELEMETRY_SETTING_PREFIX + "tracing.names.include",
- TRACING_APM_NAMES_INCLUDE_SETTING,
- Function.identity(),
OperatorDynamic,
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.
- */
- @Deprecated
- public static final Setting> TRACING_APM_NAMES_EXCLUDE_SETTING = Setting.stringListSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude",
- OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
- public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting(
+ public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting(
TELEMETRY_SETTING_PREFIX + "tracing.names.exclude",
- TRACING_APM_NAMES_EXCLUDE_SETTING,
- Function.identity(),
OperatorDynamic,
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.
- */
- @Deprecated
- public static final Setting> TRACING_APM_SANITIZE_FIELD_NAMES = Setting.stringListSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "sanitize_field_names",
+ public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting(
+ TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names",
List.of(
"password",
"passwd",
@@ -313,33 +276,12 @@ private static Setting concreteAgentSetting(String namespace, String qua
"set-cookie"
),
OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
- public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting(
- TELEMETRY_SETTING_PREFIX + "tracing.sanitize_field_names",
- TRACING_APM_SANITIZE_FIELD_NAMES,
- Function.identity(),
- OperatorDynamic,
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_TRACING_ENABLED_SETTING.
- */
- @Deprecated
- public static final Setting TRACING_APM_ENABLED_SETTING = Setting.boolSetting(
- LEGACY_TRACING_APM_SETTING_PREFIX + "enabled",
- false,
- OperatorDynamic,
- NodeScope,
- Deprecated
- );
-
public static final Setting TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting(
TELEMETRY_SETTING_PREFIX + "tracing.enabled",
- TRACING_APM_ENABLED_SETTING,
+ false,
OperatorDynamic,
NodeScope
);
@@ -351,33 +293,13 @@ private static Setting concreteAgentSetting(String namespace, String qua
NodeScope
);
- /**
- * @deprecated in favor of TELEMETRY_SECRET_TOKEN_SETTING.
- */
- @Deprecated
- public static final Setting TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString(
- LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token",
- null,
- Deprecated
- );
-
public static final Setting TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString(
TELEMETRY_SETTING_PREFIX + "secret_token",
- TRACING_APM_SECRET_TOKEN_SETTING
- );
-
- /**
- * @deprecated in favor of TELEMETRY_API_KEY_SETTING.
- */
- @Deprecated
- public static final Setting TRACING_APM_API_KEY_SETTING = SecureSetting.secureString(
- LEGACY_TRACING_APM_SETTING_PREFIX + "api_key",
- null,
- Deprecated
+ null
);
public static final Setting TELEMETRY_API_KEY_SETTING = SecureSetting.secureString(
TELEMETRY_SETTING_PREFIX + "api_key",
- TRACING_APM_API_KEY_SETTING
+ null
);
}
diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
index a60048c82a3c9..5516672420924 100644
--- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
+++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettingsTests.java
@@ -11,8 +11,6 @@
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
-import org.elasticsearch.common.settings.MockSecureSettings;
-import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import org.mockito.Mockito;
@@ -21,21 +19,13 @@
import java.util.Set;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.APM_AGENT_SETTINGS;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_API_KEY_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_SECRET_TOKEN_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_ENABLED_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_NAMES_INCLUDE_SETTING;
import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TELEMETRY_TRACING_SANITIZE_FIELD_NAMES;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_API_KEY_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_ENABLED_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_EXCLUDE_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_NAMES_INCLUDE_SETTING;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SANITIZE_FIELD_NAMES;
-import static org.elasticsearch.telemetry.apm.internal.APMAgentSettings.TRACING_APM_SECRET_TOKEN_SETTING;
-import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasItem;
import static org.mockito.Mockito.clearInvocations;
import static org.mockito.Mockito.mock;
@@ -70,14 +60,6 @@ public void testEnableTracing() {
}
}
- public void testEnableTracingUsingLegacySetting() {
- Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), true).build();
- apmAgentSettings.initAgentSystemProperties(settings);
-
- verify(apmAgentSettings).setAgentSetting("recording", "true");
- assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
public void testEnableMetrics() {
for (boolean tracingEnabled : List.of(true, false)) {
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
@@ -121,14 +103,6 @@ public void testDisableTracing() {
}
}
- public void testDisableTracingUsingLegacySetting() {
- Settings settings = Settings.builder().put(TRACING_APM_ENABLED_SETTING.getKey(), false).build();
- apmAgentSettings.initAgentSystemProperties(settings);
-
- verify(apmAgentSettings).setAgentSetting("recording", "false");
- assertWarnings("[tracing.apm.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
public void testDisableMetrics() {
for (boolean tracingEnabled : List.of(true, false)) {
clearInvocations(apmAgentSettings, apmTelemetryProvider.getMeterService());
@@ -181,70 +155,18 @@ public void testSetAgentSettings() {
verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
}
- public void testSetAgentsSettingsWithLegacyPrefix() {
- Settings settings = Settings.builder()
- .put(TELEMETRY_TRACING_ENABLED_SETTING.getKey(), true)
- .put("tracing.apm.agent.span_compression_enabled", "true")
- .build();
- apmAgentSettings.initAgentSystemProperties(settings);
-
- verify(apmAgentSettings).setAgentSetting("recording", "true");
- verify(apmAgentSettings).setAgentSetting("span_compression_enabled", "true");
- assertWarnings(
- "[tracing.apm.agent.span_compression_enabled] setting was deprecated in Elasticsearch and will be removed in a future release."
- );
- }
-
/**
* Check that invalid or forbidden APM agent settings are rejected.
*/
public void testRejectForbiddenOrUnknownAgentSettings() {
- List prefixes = List.of(APM_AGENT_SETTINGS.getKey(), "tracing.apm.agent.");
- for (String prefix : prefixes) {
- Settings settings = Settings.builder().put(prefix + "unknown", "true").build();
- Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings));
- assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]"));
- }
- // though, accept / ignore nested global_labels
- for (String prefix : prefixes) {
- Settings settings = Settings.builder().put(prefix + "global_labels.abc", "123").build();
- APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(settings);
-
- if (prefix.startsWith("tracing.apm.agent.")) {
- assertWarnings(
- "[tracing.apm.agent.global_labels.abc] setting was deprecated in Elasticsearch and will be removed in a future release."
- );
- }
- }
- }
-
- public void testTelemetryTracingNamesIncludeFallback() {
- Settings settings = Settings.builder().put(TRACING_APM_NAMES_INCLUDE_SETTING.getKey(), "abc,xyz").build();
-
- List included = TELEMETRY_TRACING_NAMES_INCLUDE_SETTING.get(settings);
-
- assertThat(included, containsInAnyOrder("abc", "xyz"));
- assertWarnings("[tracing.apm.names.include] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
- public void testTelemetryTracingNamesExcludeFallback() {
- Settings settings = Settings.builder().put(TRACING_APM_NAMES_EXCLUDE_SETTING.getKey(), "abc,xyz").build();
-
- List included = TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING.get(settings);
-
- assertThat(included, containsInAnyOrder("abc", "xyz"));
- assertWarnings("[tracing.apm.names.exclude] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
- public void testTelemetryTracingSanitizeFieldNamesFallback() {
- Settings settings = Settings.builder().put(TRACING_APM_SANITIZE_FIELD_NAMES.getKey(), "abc,xyz").build();
-
- List included = TELEMETRY_TRACING_SANITIZE_FIELD_NAMES.get(settings);
+ String prefix = APM_AGENT_SETTINGS.getKey();
+ Settings settings = Settings.builder().put(prefix + "unknown", "true").build();
+ Exception exception = expectThrows(IllegalArgumentException.class, () -> APM_AGENT_SETTINGS.getAsMap(settings));
+ assertThat(exception.getMessage(), containsString("[" + prefix + "unknown]"));
- assertThat(included, containsInAnyOrder("abc", "xyz"));
- assertWarnings(
- "[tracing.apm.sanitize_field_names] setting was deprecated in Elasticsearch and will be removed in a future release."
- );
+ // though, accept / ignore nested global_labels
+ var map = APMAgentSettings.APM_AGENT_SETTINGS.getAsMap(Settings.builder().put(prefix + "global_labels.abc", "123").build());
+ assertThat(map, hasEntry("global_labels.abc", "123"));
}
public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() {
@@ -252,28 +174,6 @@ public void testTelemetryTracingSanitizeFieldNamesFallbackDefault() {
assertThat(included, hasItem("password")); // and more defaults
}
- public void testTelemetrySecretTokenFallback() {
- MockSecureSettings secureSettings = new MockSecureSettings();
- secureSettings.setString(TRACING_APM_SECRET_TOKEN_SETTING.getKey(), "verysecret");
- Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
-
- try (SecureString secureString = TELEMETRY_SECRET_TOKEN_SETTING.get(settings)) {
- assertEquals("verysecret", secureString.toString());
- }
- assertWarnings("[tracing.apm.secret_token] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
- public void testTelemetryApiKeyFallback() {
- MockSecureSettings secureSettings = new MockSecureSettings();
- secureSettings.setString(TRACING_APM_API_KEY_SETTING.getKey(), "abc");
- Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
-
- try (SecureString secureString = TELEMETRY_API_KEY_SETTING.get(settings)) {
- assertEquals("abc", secureString.toString());
- }
- assertWarnings("[tracing.apm.api_key] setting was deprecated in Elasticsearch and will be removed in a future release.");
- }
-
/**
* Check that invalid or forbidden APM agent settings are rejected if their last part resembles an allowed setting.
*/
diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
index 4a3dfac36d4ec..2739eb51376ea 100644
--- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
+++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java
@@ -596,7 +596,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception {
verifyResolvability(dataStreamName, indicesAdmin().prepareGetFieldMappings(dataStreamName), false);
verifyResolvability(dataStreamName, indicesAdmin().preparePutMapping(dataStreamName).setSource("""
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
- verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(dataStreamName), false);
+ verifyResolvability(dataStreamName, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, dataStreamName), false);
verifyResolvability(
dataStreamName,
indicesAdmin().prepareUpdateSettings(dataStreamName).setSettings(Settings.builder().put("index.number_of_replicas", 0)),
@@ -643,7 +643,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception {
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetFieldMappings(wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().preparePutMapping(wildcardExpression).setSource("""
{"_doc":{"properties": {"my_field":{"type":"keyword"}}}}""", XContentType.JSON), false);
- verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(wildcardExpression), false);
+ verifyResolvability(wildcardExpression, indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, wildcardExpression), false);
verifyResolvability(wildcardExpression, indicesAdmin().prepareGetSettings(wildcardExpression), false);
verifyResolvability(
wildcardExpression,
@@ -1180,7 +1180,7 @@ public void testUpdateMappingViaDataStream() throws Exception {
DataStreamTimestampFieldMapper.NAME,
Map.of("enabled", true)
);
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get();
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
@@ -1195,7 +1195,7 @@ public void testUpdateMappingViaDataStream() throws Exception {
.setSource("{\"properties\":{\"my_field\":{\"type\":\"keyword\"}}}", XContentType.JSON)
.get();
// The mappings of all backing indices should be updated:
- getMappingsResponse = indicesAdmin().prepareGetMappings("logs-foobar").get();
+ getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "logs-foobar").get();
assertThat(getMappingsResponse.getMappings().size(), equalTo(2));
assertThat(getMappingsResponse.getMappings().get(backingIndex1).getSourceAsMap(), equalTo(expectedMapping));
assertThat(getMappingsResponse.getMappings().get(backingIndex2).getSourceAsMap(), equalTo(expectedMapping));
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java
index f090186480b76..8026ec641d040 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java
@@ -9,10 +9,6 @@
package org.elasticsearch.datastreams;
-import org.elasticsearch.action.admin.indices.rollover.LazyRolloverAction;
-import org.elasticsearch.action.datastreams.autosharding.DataStreamAutoShardingService;
-import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
-import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
import org.elasticsearch.features.FeatureSpecification;
import org.elasticsearch.features.NodeFeature;
@@ -27,12 +23,7 @@ public class DataStreamFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(
- DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE, // Added in 8.12
- LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER, // Added in 8.13
- DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE,
- DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14
- );
+ return Set.of();
}
@Override
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java
index cb7445705537a..7d5f4bbee32be 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java
@@ -197,8 +197,7 @@ public Collection> createComponents(PluginServices services) {
settings,
services.client(),
services.clusterService(),
- errorStoreInitialisationService.get(),
- services.featureService()
+ errorStoreInitialisationService.get()
)
);
dataLifecycleInitialisationService.set(
diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java
index 642fa4923e074..71575ee88aa7d 100644
--- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java
+++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisher.java
@@ -19,8 +19,6 @@
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
-import org.elasticsearch.features.FeatureService;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
import org.elasticsearch.health.node.DslErrorInfo;
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
@@ -45,12 +43,10 @@ public class DataStreamLifecycleHealthInfoPublisher {
Setting.Property.Dynamic,
Setting.Property.NodeScope
);
- public static final NodeFeature DSL_HEALTH_INFO_FEATURE = new NodeFeature("health.dsl.info", true);
private final Client client;
private final ClusterService clusterService;
private final DataStreamLifecycleErrorStore errorStore;
- private final FeatureService featureService;
private volatile int signallingErrorRetryInterval;
private volatile int maxNumberOfErrorsToPublish;
@@ -58,13 +54,11 @@ public DataStreamLifecycleHealthInfoPublisher(
Settings settings,
Client client,
ClusterService clusterService,
- DataStreamLifecycleErrorStore errorStore,
- FeatureService featureService
+ DataStreamLifecycleErrorStore errorStore
) {
this.client = client;
this.clusterService = clusterService;
this.errorStore = errorStore;
- this.featureService = featureService;
this.signallingErrorRetryInterval = DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING.get(settings);
this.maxNumberOfErrorsToPublish = DATA_STREAM_LIFECYCLE_MAX_ERRORS_TO_PUBLISH_SETTING.get(settings);
}
@@ -89,9 +83,6 @@ private void updateNumberOfErrorsToPublish(int newValue) {
* {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService#DATA_STREAM_SIGNALLING_ERROR_RETRY_INTERVAL_SETTING}
*/
public void publishDslErrorEntries(ActionListener actionListener) {
- if (featureService.clusterHasFeature(clusterService.state(), DSL_HEALTH_INFO_FEATURE) == false) {
- return;
- }
// fetching the entries that persist in the error store for more than the signalling retry interval
// note that we're reporting this view into the error store on every publishing iteration
List errorEntriesToSignal = errorStore.getErrorsInfo(
diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java
index ac7dabd868a3f..0bb990e544892 100644
--- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java
+++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java
@@ -67,9 +67,7 @@
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.Tuple;
-import org.elasticsearch.datastreams.DataStreamFeatures;
import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexSettings;
@@ -183,13 +181,7 @@ public void setupServices() {
() -> now,
errorStore,
allocationService,
- new DataStreamLifecycleHealthInfoPublisher(
- Settings.EMPTY,
- client,
- clusterService,
- errorStore,
- new FeatureService(List.of(new DataStreamFeatures()))
- ),
+ new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore),
globalRetentionSettings
);
clientDelegate = null;
@@ -1465,13 +1457,7 @@ public void testTrackingTimeStats() {
() -> now.getAndAdd(delta),
errorStore,
mock(AllocationService.class),
- new DataStreamLifecycleHealthInfoPublisher(
- Settings.EMPTY,
- getTransportRequestsRecordingClient(),
- clusterService,
- errorStore,
- new FeatureService(List.of(new DataStreamFeatures()))
- ),
+ new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, getTransportRequestsRecordingClient(), clusterService, errorStore),
globalRetentionSettings
);
assertThat(service.getLastRunDuration(), is(nullValue()));
diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java
index cff6127e0729e..f8a2ac3c61029 100644
--- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java
+++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthInfoPublisherTests.java
@@ -24,10 +24,8 @@
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.datastreams.DataStreamFeatures;
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore;
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo;
import org.elasticsearch.health.node.DslErrorInfo;
import org.elasticsearch.health.node.UpdateHealthInfoCacheAction;
@@ -40,7 +38,6 @@
import java.util.HashSet;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -83,13 +80,7 @@ public void setupServices() {
final Client client = getTransportRequestsRecordingClient();
errorStore = new DataStreamLifecycleErrorStore(() -> now);
- dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(
- Settings.EMPTY,
- client,
- clusterService,
- errorStore,
- new FeatureService(List.of(new DataStreamFeatures()))
- );
+ dslHealthInfoPublisher = new DataStreamLifecycleHealthInfoPublisher(Settings.EMPTY, client, clusterService, errorStore);
}
@After
@@ -105,16 +96,6 @@ public void testPublishDslErrorEntries() {
}
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
ClusterState stateWithHealthNode = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
- stateWithHealthNode = ClusterState.builder(stateWithHealthNode)
- .nodeFeatures(
- Map.of(
- node1.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
- node2.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
- )
- )
- .build();
ClusterServiceUtils.setState(clusterService, stateWithHealthNode);
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
@Override
@@ -143,16 +124,6 @@ public void testPublishDslErrorEntriesNoHealthNode() {
errorStore.recordError("testIndex", new IllegalStateException("bad state"));
ClusterState stateNoHealthNode = ClusterStateCreationUtils.state(node1, node1, null, allNodes);
- stateNoHealthNode = ClusterState.builder(stateNoHealthNode)
- .nodeFeatures(
- Map.of(
- node1.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
- node2.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
- )
- )
- .build();
ClusterServiceUtils.setState(clusterService, stateNoHealthNode);
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
@Override
@@ -170,16 +141,6 @@ public void onFailure(Exception e) {
public void testPublishDslErrorEntriesEmptyErrorStore() {
// publishes the empty error store (this is the "back to healthy" state where all errors have been fixed)
ClusterState state = ClusterStateCreationUtils.state(node1, node1, node1, allNodes);
- state = ClusterState.builder(state)
- .nodeFeatures(
- Map.of(
- node1.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id()),
- node2.getId(),
- Set.of(DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE.id())
- )
- )
- .build();
ClusterServiceUtils.setState(clusterService, state);
dslHealthInfoPublisher.publishDslErrorEntries(new ActionListener<>() {
@Override
diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
index 9ea3bfefabdf8..884adb5458102 100644
--- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
+++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml
@@ -300,9 +300,6 @@ index without timestamp with pipeline:
---
dynamic templates:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -450,9 +447,6 @@ dynamic templates:
---
dynamic templates - conflicting aliases:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -549,9 +543,6 @@ dynamic templates - conflicting aliases:
---
dynamic templates - conflicting aliases with top-level field:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [otel] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -632,9 +623,6 @@ dynamic templates - conflicting aliases with top-level field:
---
dynamic templates with nesting:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -810,10 +798,6 @@ dynamic templates with nesting:
---
dynamic templates with incremental indexing:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
-
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -1038,9 +1022,6 @@ dynamic templates with incremental indexing:
---
subobject in passthrough object auto flatten:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-passthrough-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-passthrough-template] will take precedence during new index creation"
@@ -1108,9 +1089,6 @@ enable subobjects in passthrough object:
---
passthrough objects with duplicate priority:
- - requires:
- cluster_features: ["mapper.pass_through_priority"]
- reason: support for priority in passthrough objects
- do:
catch: /has a conflicting param/
indices.put_index_template:
@@ -1135,9 +1113,6 @@ passthrough objects with duplicate priority:
---
dimensions with ignore_malformed and ignore_above:
- - requires:
- cluster_features: ["mapper.keyword_dimension_ignore_above"]
- reason: support for ignore_above on keyword dimensions
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -1229,9 +1204,6 @@ dimensions with ignore_malformed and ignore_above:
---
non string dimension fields:
- - requires:
- cluster_features: ["mapper.pass_through_priority", "routing.boolean_routing_path", "mapper.boolean_dimension"]
- reason: support for priority in passthrough objects
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
@@ -1339,10 +1311,6 @@ non string dimension fields:
---
multi value dimensions:
- - requires:
- cluster_features: ["routing.multi_value_routing_path"]
- reason: support for multi-value dimensions
-
- do:
allowed_warnings:
- "index template [my-dynamic-template] has index patterns [k9s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-dynamic-template] will take precedence during new index creation"
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
index aa48c73cf1d73..08efe87e6fde5 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java
@@ -160,11 +160,6 @@ public void writeTo(StreamOutput out) throws IOException {
if (provider instanceof Maxmind maxmind) {
out.writeString(maxmind.accountId);
} else {
- /*
- * The existence of a non-Maxmind providers is gated on the feature get_database_configuration_action.multi_node, and
- * get_database_configuration_action.multi_node is only available on or after
- * TransportVersions.INGEST_GEO_DATABASE_PROVIDERS.
- */
assert false : "non-maxmind DatabaseConfiguration.Provider [" + provider.getWriteableName() + "]";
}
}
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
index c83c40e56b749..a1faaf1bb0196 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java
@@ -17,7 +17,6 @@
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.regex.Regex;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.ingest.geoip.DatabaseNodeService;
import org.elasticsearch.ingest.geoip.GeoIpTaskState;
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
@@ -41,8 +40,6 @@
import java.util.Set;
import java.util.stream.Collectors;
-import static org.elasticsearch.ingest.IngestGeoIpFeatures.GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE;
-
public class TransportGetDatabaseConfigurationAction extends TransportNodesAction<
GetDatabaseConfigurationAction.Request,
GetDatabaseConfigurationAction.Response,
@@ -50,7 +47,6 @@ public class TransportGetDatabaseConfigurationAction extends TransportNodesActio
GetDatabaseConfigurationAction.NodeResponse,
List> {
- private final FeatureService featureService;
private final DatabaseNodeService databaseNodeService;
@Inject
@@ -59,7 +55,6 @@ public TransportGetDatabaseConfigurationAction(
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
- FeatureService featureService,
DatabaseNodeService databaseNodeService
) {
super(
@@ -70,39 +65,9 @@ public TransportGetDatabaseConfigurationAction(
GetDatabaseConfigurationAction.NodeRequest::new,
threadPool.executor(ThreadPool.Names.MANAGEMENT)
);
- this.featureService = featureService;
this.databaseNodeService = databaseNodeService;
}
- @Override
- protected void doExecute(
- Task task,
- GetDatabaseConfigurationAction.Request request,
- ActionListener listener
- ) {
- if (featureService.clusterHasFeature(clusterService.state(), GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE) == false) {
- /*
- * TransportGetDatabaseConfigurationAction used to be a TransportMasterNodeAction, and not all nodes in the cluster have been
- * updated. So we don't want to send node requests to the other nodes because they will blow up. Instead, we just return
- * the information that we used to return from the master node (it doesn't make any difference that this might not be the master
- * node, because we're only reading the cluster state). Because older nodes only know about the Maxmind provider type, we filter
- * out all others here to avoid causing problems on those nodes.
- */
- newResponseAsync(
- task,
- request,
- createActionContext(task, request).stream()
- .filter(database -> database.database().provider() instanceof DatabaseConfiguration.Maxmind)
- .toList(),
- List.of(),
- List.of(),
- listener
- );
- } else {
- super.doExecute(task, request, listener);
- }
- }
-
protected List createActionContext(Task task, GetDatabaseConfigurationAction.Request request) {
final Set ids;
if (request.getDatabaseIds().length == 0) {
diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java
index dfb8fa78089d2..e68bb9d82e91b 100644
--- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java
+++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java
@@ -29,7 +29,6 @@
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Strings;
import org.elasticsearch.core.Tuple;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.ingest.geoip.IngestGeoIpMetadata;
import org.elasticsearch.ingest.geoip.direct.PutDatabaseConfigurationAction.Request;
import org.elasticsearch.injection.guice.Inject;
@@ -42,8 +41,6 @@
import java.util.Map;
import java.util.Optional;
-import static org.elasticsearch.ingest.IngestGeoIpFeatures.PUT_DATABASE_CONFIGURATION_ACTION_IPINFO;
-
public class TransportPutDatabaseConfigurationAction extends TransportMasterNodeAction {
private static final Logger logger = LogManager.getLogger(TransportPutDatabaseConfigurationAction.class);
@@ -61,7 +58,6 @@ public void taskSucceeded(UpdateDatabaseConfigurationTask task, Void unused) {
}
};
- private final FeatureService featureService;
private final MasterServiceTaskQueue updateDatabaseConfigurationTaskQueue;
@Inject
@@ -70,8 +66,7 @@ public TransportPutDatabaseConfigurationAction(
ClusterService clusterService,
ThreadPool threadPool,
ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver,
- FeatureService featureService
+ IndexNameExpressionResolver indexNameExpressionResolver
) {
super(
PutDatabaseConfigurationAction.NAME,
@@ -84,7 +79,6 @@ public TransportPutDatabaseConfigurationAction(
AcknowledgedResponse::readFrom,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
- this.featureService = featureService;
this.updateDatabaseConfigurationTaskQueue = clusterService.createTaskQueue(
"update-geoip-database-configuration-state-update",
Priority.NORMAL,
@@ -96,18 +90,6 @@ public TransportPutDatabaseConfigurationAction(
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) {
final String id = request.getDatabase().id();
- // if this is an ipinfo configuration, then make sure the whole cluster supports that feature
- if (request.getDatabase().provider() instanceof DatabaseConfiguration.Ipinfo
- && featureService.clusterHasFeature(clusterService.state(), PUT_DATABASE_CONFIGURATION_ACTION_IPINFO) == false) {
- listener.onFailure(
- new IllegalArgumentException(
- "Unable to use ipinfo database configurations in mixed-clusters with nodes that do not support feature "
- + PUT_DATABASE_CONFIGURATION_ACTION_IPINFO.id()
- )
- );
- return;
- }
-
updateDatabaseConfigurationTaskQueue.submitTask(
Strings.format("update-geoip-database-configuration-[%s]", id),
new UpdateDatabaseConfigurationTask(listener, request.getDatabase()),
diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
index a1104505bc240..007c82db4c923 100644
--- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
+++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/40_geoip_databases.yml
@@ -1,9 +1,3 @@
----
-setup:
- - requires:
- cluster_features: ["geoip.downloader.database.configuration", "get_database_configuration_action.multi_node"]
- reason: "geoip downloader database configuration APIs added in 8.15, and updated in 8.16 to return more results"
-
---
teardown:
- do:
diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml
index fd73c715a5ac5..0947984769529 100644
--- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml
+++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/50_ip_lookup_processor.yml
@@ -1,9 +1,3 @@
-setup:
- - requires:
- cluster_features:
- - "put_database_configuration_action.ipinfo"
- reason: "ipinfo support added in 8.16"
-
---
"Test ip_location processor with defaults":
- do:
diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml
index e2e9a1fdb5e28..47f09392df60e 100644
--- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml
+++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/60_ip_location_databases.yml
@@ -1,10 +1,3 @@
----
-setup:
- - requires:
- cluster_features:
- - "put_database_configuration_action.ipinfo"
- reason: "ip location downloader database configuration APIs added in 8.16 to support more types"
-
---
teardown:
- do:
diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
index 553e4696af316..a9ab0c02612f6 100644
--- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
+++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java
@@ -16,6 +16,8 @@
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.internal.Client;
+import org.elasticsearch.cluster.metadata.IndexMetadata;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
@@ -23,7 +25,6 @@
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
-import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolStats;
@@ -49,10 +50,6 @@
* threads that wait on a phaser. This lets us verify that operations on system indices
* are being directed to other thread pools.
*/
-@TestLogging(
- reason = "investigate",
- value = "org.elasticsearch.kibana.KibanaThreadPoolIT:DEBUG,org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor:TRACE"
-)
public class KibanaThreadPoolIT extends ESIntegTestCase {
private static final Logger logger = LogManager.getLogger(KibanaThreadPoolIT.class);
@@ -68,6 +65,8 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
.put("thread_pool.write.queue_size", 1)
.put("thread_pool.get.size", 1)
.put("thread_pool.get.queue_size", 1)
+ // a rejected GET may retry on an INITIALIZING shard (the target of a relocation) and unexpectedly succeed, so block rebalancing
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
.build();
}
@@ -112,7 +111,12 @@ public void testKibanaThreadPoolByPassesBlockedThreadPools() throws Exception {
}
public void testBlockedThreadPoolsRejectUserRequests() throws Exception {
- assertAcked(client().admin().indices().prepareCreate(USER_INDEX));
+ assertAcked(
+ client().admin()
+ .indices()
+ .prepareCreate(USER_INDEX)
+ .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) // avoid retrying rejected actions
+ );
runWithBlockedThreadPools(this::assertThreadPoolsBlocked);
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml
index 25088f51e2b59..1434450b65a6a 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml
@@ -221,9 +221,6 @@ setup:
- close_to: {hits.hits.2._score: {value: 186.34454, error: 0.01}}
---
"Test hamming distance fails on float":
- - requires:
- cluster_features: ["script.hamming"]
- reason: "support for hamming distance added in 8.15"
- do:
headers:
Content-Type: application/json
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml
index cdd65ca0eb296..05a10ffdbccdb 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/146_dense_vector_bit_basic.yml
@@ -1,7 +1,5 @@
setup:
- requires:
- cluster_features: ["mapper.vectors.bit_vectors"]
- reason: "support for bit vectors added in 8.15"
test_runner_features: headers
- do:
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml
index 373f048e7be78..a6c111be681f9 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/151_dense_vector_byte_hamming.yml
@@ -1,7 +1,5 @@
setup:
- requires:
- cluster_features: ["script.hamming"]
- reason: "support for hamming distance added in 8.15"
test_runner_features: headers
- do:
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml
index f82b844f01588..3a869640993f4 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/190_term_statistics_script_score.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: ["script.term_stats"]
- reason: "support for term stats has been added in 8.16"
-
- do:
indices.create:
index: test-index
diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml
index de4d6530f4a92..3a9c71e3c2bab 100644
--- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml
+++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/191_term_statistics_function_score.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: ["script.term_stats"]
- reason: "support for term stats has been added in 8.16"
-
- do:
indices.create:
index: test-index
diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml
index 968e93cf9fc55..175abe183106b 100644
--- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml
+++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml
@@ -251,11 +251,6 @@ setup:
---
"Usage stats":
- - requires:
- cluster_features:
- - repositories.supports_usage_stats
- reason: requires this feature
-
- do:
cluster.stats: {}
diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml
index e8c34a4b6a20b..d2370919297a3 100644
--- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml
+++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml
@@ -234,11 +234,6 @@ setup:
---
"Usage stats":
- - requires:
- cluster_features:
- - repositories.supports_usage_stats
- reason: requires this feature
-
- do:
cluster.stats: {}
diff --git a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java
index 93915e8491d5b..3d7c8dd150610 100644
--- a/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java
+++ b/modules/repository-s3/src/javaRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioBasicCredentialsRestIT.java
@@ -19,13 +19,15 @@
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
+import java.util.Locale;
+
@ThreadLeakFilters(filters = { TestContainersThreadFilter.class })
@ThreadLeakScope(ThreadLeakScope.Scope.NONE) // https://github.com/elastic/elasticsearch/issues/102482
public class RepositoryS3MinioBasicCredentialsRestIT extends AbstractRepositoryS3RestTestCase {
- private static final String PREFIX = getIdentifierPrefix("RepositoryS3MinioBasicCredentialsRestIT");
+ private static final String PREFIX = getIdentifierPrefix("RepositoryS3MinioBasicCredentialsRestIT").toLowerCase(Locale.ROOT);
private static final String BUCKET = PREFIX + "bucket";
- private static final String BASE_PATH = PREFIX + "base_path";
+ private static final String BASE_PATH = PREFIX + "base-path";
private static final String ACCESS_KEY = PREFIX + "access-key";
private static final String SECRET_KEY = PREFIX + "secret-key";
private static final String CLIENT = "minio_client";
diff --git a/muted-tests.yml b/muted-tests.yml
index 2f652f87ab283..9766d3ed35f18 100644
--- a/muted-tests.yml
+++ b/muted-tests.yml
@@ -52,9 +52,6 @@ tests:
- class: org.elasticsearch.xpack.transform.integration.TransformIT
method: testStopWaitForCheckpoint
issue: https://github.com/elastic/elasticsearch/issues/106113
-- class: org.elasticsearch.kibana.KibanaThreadPoolIT
- method: testBlockedThreadPoolsRejectUserRequests
- issue: https://github.com/elastic/elasticsearch/issues/113939
- class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT
method: testPutE5Small_withPlatformAgnosticVariant
issue: https://github.com/elastic/elasticsearch/issues/113983
@@ -227,50 +224,45 @@ tests:
- class: org.elasticsearch.search.profile.dfs.DfsProfilerIT
method: testProfileDfs
issue: https://github.com/elastic/elasticsearch/issues/119711
-- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
- method: testSingleMatchFunctionFilterPushdownWithStringValues {default}
- issue: https://github.com/elastic/elasticsearch/issues/119720
-- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
- method: testSingleMatchFunctionPushdownWithCasting {default}
- issue: https://github.com/elastic/elasticsearch/issues/119722
-- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
- method: testSingleMatchOperatorFilterPushdownWithStringValues {default}
- issue: https://github.com/elastic/elasticsearch/issues/119721
-- class: org.elasticsearch.xpack.inference.action.filter.ShardBulkInferenceActionFilterIT
- method: testBulkOperations {p0=false}
- issue: https://github.com/elastic/elasticsearch/issues/119901
- class: org.elasticsearch.xpack.inference.InferenceCrudIT
method: testGetServicesWithCompletionTaskType
issue: https://github.com/elastic/elasticsearch/issues/119959
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testSearchableSnapshotUpgrade {p0=[9.0.0, 8.18.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119978
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119979
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testMountSearchableSnapshot {p0=[9.0.0, 8.18.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119550
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 8.18.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119980
-- class: org.elasticsearch.index.codec.vectors.es816.ES816HnswBinaryQuantizedVectorsFormatTests
- method: testRandomExceptions
- issue: https://github.com/elastic/elasticsearch/issues/119981
- class: org.elasticsearch.multi_cluster.MultiClusterYamlTestSuiteIT
issue: https://github.com/elastic/elasticsearch/issues/119983
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 9.0.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119989
-- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
- method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 9.0.0]}
- issue: https://github.com/elastic/elasticsearch/issues/119990
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=transform/transforms_unattended/Test unattended put and start}
issue: https://github.com/elastic/elasticsearch/issues/120019
- class: org.elasticsearch.index.mapper.IntervalThrottlerTests
method: testThrottling
issue: https://github.com/elastic/elasticsearch/issues/120023
+- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT
+ method: testCheckThrows {pathPrefix=denied actionName=sslSessionImpl_getSessionContext}
+ issue: https://github.com/elastic/elasticsearch/issues/120053
+- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT
+ method: testCheckThrows {pathPrefix=denied_nonmodular actionName=sslSessionImpl_getSessionContext}
+ issue: https://github.com/elastic/elasticsearch/issues/120054
+- class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT
+ method: testUpdatePolicyToAddPhasesYieldsInvalidActionsToBeSkipped
+ issue: https://github.com/elastic/elasticsearch/issues/118406
+- class: org.elasticsearch.xpack.ml.integration.DatafeedJobsIT
+ issue: https://github.com/elastic/elasticsearch/issues/120088
+- class: org.elasticsearch.xpack.searchablesnapshots.minio.MinioSearchableSnapshotsIT
+ issue: https://github.com/elastic/elasticsearch/issues/120101
+- class: org.elasticsearch.repositories.s3.S3RepositoryThirdPartyTests
+ issue: https://github.com/elastic/elasticsearch/issues/120115
+- class: org.elasticsearch.repositories.s3.RepositoryS3MinioBasicCredentialsRestIT
+ issue: https://github.com/elastic/elasticsearch/issues/120117
+- class: org.elasticsearch.repositories.blobstore.testkit.analyze.MinioRepositoryAnalysisRestIT
+ issue: https://github.com/elastic/elasticsearch/issues/118548
+- class: org.elasticsearch.xpack.security.QueryableReservedRolesIT
+ method: testConfiguredReservedRolesAfterClosingAndOpeningIndex
+ issue: https://github.com/elastic/elasticsearch/issues/120127
+- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT
+ method: testOldRepoAccess
+ issue: https://github.com/elastic/elasticsearch/issues/120148
+- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT
+ method: testOldSourceOnlyRepoAccess
+ issue: https://github.com/elastic/elasticsearch/issues/120080
# Examples:
#
diff --git a/plugins/mapper-annotated-text/src/main/java/module-info.java b/plugins/mapper-annotated-text/src/main/java/module-info.java
index 13f2bd66418be..58aca0d2857fe 100644
--- a/plugins/mapper-annotated-text/src/main/java/module-info.java
+++ b/plugins/mapper-annotated-text/src/main/java/module-info.java
@@ -15,6 +15,4 @@
requires org.apache.lucene.highlighter;
// exports nothing
-
- provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.index.mapper.annotatedtext.Features;
}
diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
index 33b5db1c4662d..4b2006430b89e 100644
--- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
+++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
@@ -22,7 +22,6 @@
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.IndexOptions;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.analysis.AnalyzerScope;
import org.elasticsearch.index.analysis.IndexAnalyzers;
@@ -64,8 +63,6 @@
**/
public class AnnotatedTextFieldMapper extends FieldMapper {
- public static final NodeFeature SYNTHETIC_SOURCE_SUPPORT = new NodeFeature("mapper.annotated_text.synthetic_source", true);
-
public static final String CONTENT_TYPE = "annotated_text";
private static Builder builder(FieldMapper in) {
diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java
deleted file mode 100644
index 51a2d2bbe1d40..0000000000000
--- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/Features.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.index.mapper.annotatedtext;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-/**
- * Provides features for annotated text mapper.
- */
-public class Features implements FeatureSpecification {
- @Override
- public Set getFeatures() {
- return Set.of(
- AnnotatedTextFieldMapper.SYNTHETIC_SOURCE_SUPPORT // Added in 8.15
- );
- }
-}
diff --git a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
deleted file mode 100644
index 1fc11da18fc3c..0000000000000
--- a/plugins/mapper-annotated-text/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
+++ /dev/null
@@ -1,10 +0,0 @@
-#
- # Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- # or more contributor license agreements. Licensed under the "Elastic License
- # 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- # Public License v 1"; you may not use this file except in compliance with, at
- # your election, the "Elastic License 2.0", the "GNU Affero General Public
- # License v3.0 only", or the "Server Side Public License, v 1".
-#
-
-org.elasticsearch.index.mapper.annotatedtext.Features
diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
index c2251910c3122..435849821691e 100644
--- a/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
+++ b/plugins/mapper-size/src/internalClusterTest/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java
@@ -91,7 +91,7 @@ private void assertSizeMappingEnabled(String index, boolean enabled) throws IOEx
"Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s",
index
);
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(index).get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
Map mappingSource = getMappingsResponse.getMappings().get(index).getSourceAsMap();
assertThat(errMsg, mappingSource, hasKey("_size"));
String sizeAsString = mappingSource.get("_size").toString();
diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java
index d9adec47ff483..30367bf55d8cc 100644
--- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java
+++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java
@@ -11,7 +11,6 @@
import com.carrotsearch.randomizedtesting.annotations.Name;
-import org.elasticsearch.Build;
import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest;
import org.elasticsearch.client.Request;
import org.elasticsearch.cluster.metadata.DesiredNode;
@@ -84,7 +83,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve
randomDoubleProcessorCount(),
ByteSizeValue.ofGb(randomIntBetween(10, 24)),
ByteSizeValue.ofGb(randomIntBetween(128, 256)),
- clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version()
+ null
)
)
.toList();
@@ -96,7 +95,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve
new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)),
ByteSizeValue.ofGb(randomIntBetween(10, 24)),
ByteSizeValue.ofGb(randomIntBetween(128, 256)),
- clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version()
+ null
);
}).toList();
}
diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java
index ce514c5f1b1e7..c48ae9ba1843b 100644
--- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java
+++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java
@@ -12,12 +12,15 @@
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction;
+import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction;
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction;
import org.elasticsearch.action.admin.indices.template.get.GetComposableIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
+import org.elasticsearch.action.admin.indices.template.post.SimulateIndexTemplateAction;
+import org.elasticsearch.action.admin.indices.template.post.SimulateTemplateAction;
import org.elasticsearch.action.support.CancellableActionTestPlugin;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.RefCountingListener;
@@ -81,6 +84,25 @@ public void testGetComposableTemplateCancellation() {
runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_index_template"), GetComposableIndexTemplateAction.NAME);
}
+ public void testSimulateTemplateCancellation() {
+ runRestActionCancellationTest(
+ new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate/random_index_template"),
+ SimulateTemplateAction.NAME
+ );
+ }
+
+ public void testSimulateIndexTemplateCancellation() {
+ createIndex("test");
+ runRestActionCancellationTest(
+ new Request(HttpPost.METHOD_NAME, "/_index_template/_simulate_index/test"),
+ SimulateIndexTemplateAction.NAME
+ );
+ }
+
+ public void testClusterGetSettingsCancellation() {
+ runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/settings"), ClusterGetSettingsAction.NAME);
+ }
+
private void runRestActionCancellationTest(Request request, String actionName) {
final var node = usually() ? internalCluster().getRandomNodeName() : internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml
index d4843fb152888..4a5ceeb66f661 100644
--- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml
+++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml
@@ -222,10 +222,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.validation.templates"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -313,10 +309,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.component.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -494,10 +486,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.component.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -617,10 +605,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.component.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -816,10 +800,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.index.template.substitutions"]
- reason: "ingest simulate index template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -1010,10 +990,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.index.template.substitutions"]
- reason: "ingest simulate component template substitutions added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -1227,10 +1203,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.addition"]
- reason: "ingest simulate mapping addition added in 8.17"
-
- do:
headers:
Content-Type: application/json
@@ -1463,10 +1435,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.addition"]
- reason: "ingest simulate mapping addition added in 8.17"
-
- do:
indices.put_template:
name: my-legacy-template
@@ -1584,10 +1552,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.support.non.template.mapping"]
- reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17"
-
# A global match-everything legacy template is added to the cluster sometimes (rarely). We have to get rid of this template if it exists
# because this test is making sure we get correct behavior when an index matches *no* template:
- do:
diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle
index 1da8e906582b1..e4b46b98cedda 100644
--- a/rest-api-spec/build.gradle
+++ b/rest-api-spec/build.gradle
@@ -85,4 +85,6 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task ->
task.skipTest("search.vectors/110_knn_query_with_filter/PRE_FILTER: pre-filter across multiple aliases", "waiting for #118774 backport")
task.skipTest("search.vectors/160_knn_query_missing_params/kNN search in a dis_max query - missing num_candidates", "waiting for #118774 backport")
task.skipTest("search.highlight/30_max_analyzed_offset/Plain highlighter with max_analyzed_offset < 0 should FAIL", "semantics of test has changed")
+ task.skipTest("indices.create/10_basic/Create lookup index", "default auto_expand_replicas was removed")
+ task.skipTest("indices.create/10_basic/Create lookup index with one shard", "default auto_expand_replicas was removed")
})
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json
index 5862804257c67..5004ab8de697d 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json
@@ -26,7 +26,7 @@
},
"master_timeout":{
"type":"time",
- "description":"Explicit operation timeout for connection to master node"
+ "description":"Timeout for waiting for new cluster state in case it is blocked"
},
"timeout":{
"type":"time",
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json
deleted file mode 100644
index 2327519ff2816..0000000000000
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.unfreeze.json
+++ /dev/null
@@ -1,67 +0,0 @@
-{
- "indices.unfreeze":{
- "documentation":{
- "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html",
- "description":"Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again."
- },
- "stability":"stable",
- "visibility":"public",
- "headers":{
- "accept": [ "application/json"]
- },
- "url":{
- "paths":[
- {
- "path":"/{index}/_unfreeze",
- "methods":[
- "POST"
- ],
- "parts":{
- "index":{
- "type":"string",
- "description":"The name of the index to unfreeze"
- }
- },
- "deprecated":{
- "version":"7.14.0",
- "description":"Frozen indices are deprecated because they provide no benefit given improvements in heap memory utilization. They will be removed in a future release."
- }
- }
- ]
- },
- "params":{
- "timeout":{
- "type":"time",
- "description":"Explicit operation timeout"
- },
- "master_timeout":{
- "type":"time",
- "description":"Specify timeout for connection to master"
- },
- "ignore_unavailable":{
- "type":"boolean",
- "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)"
- },
- "allow_no_indices":{
- "type":"boolean",
- "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
- },
- "expand_wildcards":{
- "type":"enum",
- "options":[
- "open",
- "closed",
- "hidden",
- "none",
- "all"
- ],
- "default":"closed",
- "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both."
- },
- "wait_for_active_shards":{
- "type":"string",
- "description":"Sets the number of active shards to wait for before the operation returns."
- }
- }
- }
-}
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml
index 9da6d2c5f086e..ce3f7f0198399 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/field_caps/70_index_mode.yml
@@ -1,9 +1,5 @@
---
setup:
- - requires:
- cluster_features: "mapper.query_index_mode"
- reason: "require index_mode"
-
- do:
indices.create:
index: test_metrics
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml
index 13f6ca58ea295..a0061272a2c23 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml
@@ -1014,10 +1014,6 @@ flattened field:
---
flattened field with ignore_above:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -1070,10 +1066,6 @@ flattened field with ignore_above:
---
flattened field with ignore_above and arrays:
- - requires:
- cluster_features: ["mapper.flattened.ignore_above_with_arrays_support"]
- reason: requires support of ignore_above synthetic source with arrays
-
- do:
indices.create:
index: test
@@ -1127,10 +1119,6 @@ flattened field with ignore_above and arrays:
---
completion:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_fallback"]
- reason: introduced in 8.15.0
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml
index 414c24cfffd7d..7b8f785a2cb93 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/92_metrics_auto_subobjects.yml
@@ -2,7 +2,6 @@
"Metrics object indexing":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: requires supporting subobjects auto setting
- do:
@@ -69,7 +68,6 @@
"Root with metrics":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: requires supporting subobjects auto setting
- do:
@@ -131,7 +129,6 @@
"Metrics object indexing with synthetic source":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: added in 8.4.0
- do:
@@ -201,7 +198,6 @@
"Root without subobjects with synthetic source":
- requires:
test_runner_features: [ "allowed_warnings", "allowed_warnings_regex" ]
- cluster_features: ["mapper.subobjects_auto"]
reason: added in 8.4.0
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml
index d0e1759073e1b..8645c91a51ad3 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml
@@ -171,7 +171,6 @@
index: test_lookup
- match: { test_lookup.settings.index.number_of_shards: "1"}
- - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"}
---
"Create lookup index with one shard":
@@ -196,7 +195,6 @@
index: test_lookup
- match: { test_lookup.settings.index.number_of_shards: "1"}
- - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"}
---
"Create lookup index with two shards":
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml
index 5003f6df79a14..72dddcf8052cc 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml
@@ -6,7 +6,7 @@ setup:
---
object with unmapped fields:
- requires:
- cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -56,10 +56,6 @@ object with unmapped fields:
---
unmapped arrays:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -103,7 +99,7 @@ unmapped arrays:
---
nested object with unmapped fields:
- requires:
- cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -154,7 +150,7 @@ nested object with unmapped fields:
---
empty object with unmapped fields:
- requires:
- cluster_features: ["mapper.track_ignored_source", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -314,10 +310,6 @@ disabled object contains array:
---
disabled subobject:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -353,10 +345,6 @@ disabled subobject:
---
disabled subobject with array:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -393,10 +381,6 @@ disabled subobject with array:
---
mixed disabled and enabled objects:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -488,10 +472,6 @@ object with dynamic override:
---
subobject with dynamic override:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -537,10 +517,6 @@ subobject with dynamic override:
---
object array in object with dynamic override:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -592,10 +568,6 @@ object array in object with dynamic override:
---
value array in object with dynamic override:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -636,10 +608,6 @@ value array in object with dynamic override:
---
nested object:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -682,10 +650,6 @@ nested object:
---
nested object next to regular:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -729,10 +693,6 @@ nested object next to regular:
---
nested object with disabled:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -818,10 +778,6 @@ nested object with disabled:
---
doubly nested object:
- - requires:
- cluster_features: ["mapper.track_ignored_source"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -915,7 +871,7 @@ doubly nested object:
---
subobjects auto:
- requires:
- cluster_features: ["mapper.subobjects_auto", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source and supporting subobjects auto setting
- do:
@@ -1003,10 +959,6 @@ subobjects auto:
---
synthetic_source with copy_to:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1140,10 +1092,6 @@ synthetic_source with copy_to:
---
synthetic_source with disabled doc_values:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"]
- reason: requires disabled doc_values support in synthetic source
-
- do:
indices.create:
index: test
@@ -1224,10 +1172,6 @@ synthetic_source with disabled doc_values:
---
fallback synthetic_source for text field:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"]
- reason: requires disabled doc_values support in synthetic source
-
- do:
indices.create:
index: test
@@ -1259,10 +1203,6 @@ fallback synthetic_source for text field:
---
synthetic_source with copy_to and ignored values:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1328,10 +1268,6 @@ synthetic_source with copy_to and ignored values:
---
synthetic_source with copy_to field having values in source:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1392,10 +1328,6 @@ synthetic_source with copy_to field having values in source:
---
synthetic_source with ignored source field using copy_to:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1457,10 +1389,6 @@ synthetic_source with ignored source field using copy_to:
---
synthetic_source with copy_to field from dynamic template having values in source:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1555,7 +1483,6 @@ synthetic_source with copy_to field from dynamic template having values in sourc
---
synthetic_source with copy_to and invalid values for copy:
- requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_fix"]
reason: requires copy_to support in synthetic source
test_runner_features: "contains"
@@ -1592,10 +1519,6 @@ synthetic_source with copy_to and invalid values for copy:
---
synthetic_source with copy_to pointing inside object:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1697,10 +1620,6 @@ synthetic_source with copy_to pointing inside object:
---
synthetic_source with copy_to pointing to ambiguous field:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1745,10 +1664,6 @@ synthetic_source with copy_to pointing to ambiguous field:
---
synthetic_source with copy_to pointing to ambiguous field and subobjects false:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1794,10 +1709,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects false:
---
synthetic_source with copy_to pointing to ambiguous field and subobjects auto:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
@@ -1845,7 +1756,6 @@ synthetic_source with copy_to pointing to ambiguous field and subobjects auto:
synthetic_source with copy_to pointing at dynamic field:
- requires:
test_runner_features: contains
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
reason: requires copy_to support in synthetic source
- do:
@@ -1931,10 +1841,6 @@ synthetic_source with copy_to pointing at dynamic field:
---
synthetic_source with copy_to pointing inside dynamic object:
- - requires:
- cluster_features: ["mapper.source.synthetic_source_copy_to_inside_objects_fix"]
- reason: requires copy_to support in synthetic source
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml
index 095665e9337b1..803b8a7d0062f 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml
@@ -6,7 +6,7 @@ setup:
---
object param - store complex object:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -71,7 +71,7 @@ object param - store complex object:
---
object param - object array:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -136,7 +136,7 @@ object param - object array:
---
object param - object array within array:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -180,7 +180,7 @@ object param - object array within array:
---
object param - no object array:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -223,7 +223,7 @@ object param - no object array:
---
object param - field ordering in object array:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -273,7 +273,7 @@ object param - field ordering in object array:
---
object param - nested object array next to other fields:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -380,7 +380,7 @@ object param - nested object with stored array:
---
index param - nested array within array:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires tracking ignored source
- do:
@@ -428,7 +428,7 @@ index param - nested array within array:
# 112156
stored field under object with store_array_source:
- requires:
- cluster_features: ["mapper.source.synthetic_source_stored_fields_advance_fix", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires bug fix to be implemented
- do:
@@ -477,10 +477,6 @@ stored field under object with store_array_source:
---
field param - keep root array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -535,10 +531,6 @@ field param - keep root array:
---
field param - keep nested array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -605,7 +597,6 @@ field param - keep nested array:
field param - keep root singleton fields:
- requires:
test_runner_features: close_to
- cluster_features: ["mapper.synthetic_source_keep"]
reason: requires keeping singleton source
- do:
@@ -695,7 +686,6 @@ field param - keep root singleton fields:
field param - keep nested singleton fields:
- requires:
test_runner_features: close_to
- cluster_features: ["mapper.synthetic_source_keep"]
reason: requires keeping singleton source
- do:
@@ -776,10 +766,6 @@ field param - keep nested singleton fields:
---
field param - nested array within array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires tracking ignored source
-
- do:
indices.create:
index: test
@@ -821,7 +807,7 @@ field param - nested array within array:
---
index param - root arrays:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires keeping array source
- do:
@@ -900,10 +886,6 @@ index param - root arrays:
---
index param - dynamic root arrays:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -952,10 +934,6 @@ index param - dynamic root arrays:
---
index param - object array within array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1001,10 +979,6 @@ index param - object array within array:
---
index param - no object array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1045,10 +1019,6 @@ index param - no object array:
---
index param - field ordering:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1095,10 +1065,6 @@ index param - field ordering:
---
index param - nested arrays:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1162,10 +1128,6 @@ index param - nested arrays:
---
index param - nested object with stored array:
- - requires:
- cluster_features: ["mapper.synthetic_source_keep"]
- reason: requires keeping array source
-
- do:
indices.create:
index: test
@@ -1214,7 +1176,7 @@ index param - nested object with stored array:
---
index param - flattened fields:
- requires:
- cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"]
+ cluster_features: ["mapper.bwc_workaround_9_0"]
reason: requires keeping array source
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
index 3d82539944a97..89816be5ca8e7 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml
@@ -453,8 +453,6 @@
---
"Composable index templates that include subobjects: auto at root":
- requires:
- cluster_features: ["mapper.subobjects_auto"]
- reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0"
test_runner_features: "allowed_warnings"
- do:
@@ -504,8 +502,6 @@
---
"Composable index templates that include subobjects: auto on arbitrary field":
- requires:
- cluster_features: ["mapper.subobjects_auto"]
- reason: "https://github.com/elastic/elasticsearch/issues/96768 fixed at 8.11.0"
test_runner_features: "allowed_warnings"
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml
index c88d638199dba..d07d03cb7146c 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/20_nested.yml
@@ -1,8 +1,5 @@
---
sort doc with nested object:
- - requires:
- cluster_features: ["mapper.index_sorting_on_nested"]
- reason: uses index sorting on nested fields
- do:
indices.create:
index: test
@@ -66,9 +63,6 @@ sort doc with nested object:
---
sort doc on nested field:
- - requires:
- cluster_features: [ "mapper.index_sorting_on_nested" ]
- reason: uses index sorting on nested fields
- do:
catch: /cannot apply index sort to field \[nested_field\.foo\] under nested object \[nested_field\]/
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml
index 07af3fb52b92f..2a31b3bd387c4 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/logsdb/10_settings.yml
@@ -312,7 +312,6 @@ override sort mode settings:
---
override sort field using nested field type in sorting:
- requires:
- cluster_features: ["mapper.index_sorting_on_nested"]
test_runner_features: [ capabilities ]
capabilities:
- method: PUT
@@ -358,9 +357,6 @@ override sort field using nested field type in sorting:
---
override sort field using nested field type:
- - requires:
- cluster_features: ["mapper.index_sorting_on_nested"]
- reason: "Support for index sorting on indexes with nested objects required"
- do:
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml
index 084f104932d99..8485aba0ecc6a 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml
@@ -55,9 +55,6 @@ keyword:
---
keyword with normalizer:
- - requires:
- cluster_features: [ "mapper.keyword_normalizer_synthetic_source" ]
- reason: support for normalizer on keyword fields
- do:
indices.create:
index: test-keyword-with-normalizer
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
index 9d6e8da8c1e1e..2a14c291d5d31 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml
@@ -417,7 +417,6 @@
- requires:
test_runner_features: [arbitrary_key]
- cluster_features: ["mapper.query_index_mode"]
reason: "_ignored_source added to mappings"
- do:
@@ -511,10 +510,6 @@
---
"Lucene segment level fields stats":
- - requires:
- cluster_features: ["mapper.segment_level_fields_stats"]
- reason: "segment level fields stats"
-
- do:
indices.create:
index: index1
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml
index 3ec854e93d82c..20e9d92a36088 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/90_fs_watermark_stats.yml
@@ -1,8 +1,6 @@
---
"Allocation stats":
- requires:
- cluster_features: ["stats.include_disk_thresholds"]
- reason: "fs watermark stats was added in 8.15.0"
test_runner_features: [arbitrary_key]
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml
index 3432a1e34c018..6ca17cc9cdce9 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml
@@ -520,10 +520,6 @@ setup:
---
"Null bounds":
- - requires:
- cluster_features: ["mapper.range.null_values_off_by_one_fix"]
- reason: fixed in 8.15.0
-
- do:
index:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml
index bd14fb182ac5a..94db54d152941 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/60_unified_matched_fields.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: 'unified_highlighter_matched_fields'
- reason: 'test requires unified highlighter to support matched_fields'
-
- do:
indices.create:
index: index1
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml
index a3d920d903ae8..bc4e262ea53c6 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/101_knn_nested_search_bits.yml
@@ -1,8 +1,6 @@
setup:
- requires:
- cluster_features: "mapper.vectors.bit_vectors"
test_runner_features: close_to
- reason: 'bit vectors added in 8.15'
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
index 3d4841a16d82d..cffc12a8d24ae 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/110_knn_query_with_filter.yml
@@ -59,9 +59,6 @@ setup:
---
"Simple knn query":
- - requires:
- cluster_features: "search.vectors.k_param_supported"
- reason: 'k param for knn as query is required'
- do:
search:
index: my_index
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml
index f6538b573809a..c92c88df91641 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/190_knn_query-with-k-param.yml
@@ -1,8 +1,6 @@
# test how knn query interact with other queries
setup:
- requires:
- cluster_features: "search.vectors.k_param_supported"
- reason: 'k param for knn as query is required'
test_runner_features: close_to
- do:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml
index 3f81c0044d170..abde3e86dd05b 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_bbq_hnsw.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bbq"
- reason: 'kNN float to better-binary quantization is required'
- do:
indices.create:
index: bbq_hnsw
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml
index baf568762dd17..9b27aea4b1db7 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_half_byte_quantized.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.int4_quantization"
- reason: 'kNN float to half-byte quantization is required'
- do:
indices.create:
index: hnsw_byte_quantized
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml
index 0bc111576c2a9..2541de7023bf0 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_bbq_flat.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bbq"
- reason: 'kNN float to better-binary quantization is required'
- do:
indices.create:
index: bbq_flat
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml
index 0e0180e58fd96..f9f8d56e1d9c9 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/42_knn_search_int4_flat.yml
@@ -1,7 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.int4_quantization"
- reason: 'kNN float to half-byte quantization is required'
- do:
indices.create:
index: int4_flat
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml
index 680433a5945fd..ef2ae3ba7ee0a 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bit_vectors"
- reason: 'mapper.vectors.bit_vectors'
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml
index 783f08a5d4ff4..07261e6a30c77 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_bit_flat.yml
@@ -1,8 +1,4 @@
setup:
- - requires:
- cluster_features: "mapper.vectors.bit_vectors"
- reason: 'mapper.vectors.bit_vectors'
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml
index 44d966b76f34e..8915325c3a67b 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml
@@ -1128,10 +1128,6 @@ fetch geo_point:
---
"Test with subobjects: auto":
- - requires:
- cluster_features: "mapper.subobjects_auto"
- reason: requires support for subobjects auto setting
-
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml
index 1730a49f743d9..7e00cbb01c589 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/530_ignore_above_stored_source.yml
@@ -1,8 +1,5 @@
---
ignore_above mapping level setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -42,9 +39,6 @@ ignore_above mapping level setting:
---
ignore_above mapping level setting on arrays:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -84,9 +78,6 @@ ignore_above mapping level setting on arrays:
---
ignore_above mapping overrides setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -128,9 +119,6 @@ ignore_above mapping overrides setting:
---
ignore_above mapping overrides setting on arrays:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -172,9 +160,6 @@ ignore_above mapping overrides setting on arrays:
---
date ignore_above index level setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml
index 772c3c24170cd..045f757b08302 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/540_ignore_above_synthetic_source.yml
@@ -5,9 +5,6 @@ setup:
---
ignore_above mapping level setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -47,9 +44,6 @@ ignore_above mapping level setting:
---
ignore_above mapping level setting on arrays:
- - requires:
- cluster_features: [ "mapper.flattened.ignore_above_with_arrays_support" ]
- reason: requires support of ignore_above with arrays for flattened fields
- do:
indices.create:
index: test
@@ -90,9 +84,6 @@ ignore_above mapping level setting on arrays:
---
ignore_above mapping overrides setting:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
@@ -135,9 +126,6 @@ ignore_above mapping overrides setting:
---
ignore_above mapping overrides setting on arrays:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml
index 3c29845871fe7..6e711ee143b06 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/550_ignore_above_invalid.yml
@@ -16,9 +16,6 @@ ignore_above index setting negative value:
---
keyword ignore_above mapping setting negative value:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
catch: bad_request
indices.create:
@@ -32,9 +29,6 @@ keyword ignore_above mapping setting negative value:
---
flattened ignore_above mapping setting negative value:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
catch: bad_request
indices.create:
@@ -48,9 +42,6 @@ flattened ignore_above mapping setting negative value:
---
wildcard ignore_above mapping setting negative value:
- - requires:
- cluster_features: [ "mapper.ignore_above_index_level_setting" ]
- reason: introduce ignore_above index level setting
- do:
catch: bad_request
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml
index a4a9b1aaecb22..71e0c2d147c1e 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/600_flattened_ignore_above.yml
@@ -1,8 +1,5 @@
---
flattened ignore_above single-value field:
- - requires:
- cluster_features: [ "flattened.ignore_above_support" ]
- reason: introduce ignore_above support in flattened fields
- do:
indices.create:
index: test
@@ -65,9 +62,6 @@ flattened ignore_above single-value field:
---
flattened ignore_above multi-value field:
- - requires:
- cluster_features: [ "flattened.ignore_above_support" ]
- reason: introduce ignore_above support in flattened fields
- do:
indices.create:
index: test
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml
index da0f00d960534..70a3b0253c78f 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml
@@ -119,10 +119,6 @@ setup:
- skip:
features: headers
- - requires:
- cluster_features: ["simulate.mapping.validation"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
headers:
Content-Type: application/json
@@ -265,10 +261,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.validation.templates"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
indices.put_template:
name: v1_template
@@ -401,10 +393,6 @@ setup:
- headers
- allowed_warnings
- - requires:
- cluster_features: ["simulate.mapping.validation.templates"]
- reason: "ingest simulate index mapping validation added in 8.16"
-
- do:
allowed_warnings:
- "index template [my-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation"
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml
index 616afd3cf67ad..1e841c8893fc6 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/140_routing_path.yml
@@ -122,8 +122,6 @@ missing dimension on routing path field:
multi-value routing path field succeeds:
- requires:
test_runner_features: close_to
- cluster_features: ["routing.multi_value_routing_path"]
- reason: support for multi-value dimensions
- do:
indices.create:
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml
index beba6f2752a11..5a5ae03ab938f 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml
@@ -65,9 +65,6 @@ setup:
---
generates a consistent id:
- - requires:
- cluster_features: "tsdb.ts_routing_hash_doc_value_parse_byte_ref"
- reason: _tsid routing hash doc value parsing has been fixed
- do:
bulk:
refresh: true
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml
index dae50704dd0d0..a8d256bbc097e 100644
--- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml
@@ -340,9 +340,6 @@ sort by tsid:
---
aggs by index_mode:
- - requires:
- cluster_features: ["mapper.query_index_mode"]
- reason: require _index_mode metadata field
- do:
search:
index: test
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java
index 420f6427a55e1..68e65b16aa3a2 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java
@@ -519,7 +519,7 @@ public void testDeleteIndex() {
public void testGetMappings() {
interceptTransportActions(GetMappingsAction.NAME);
- GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(randomIndicesOrAliases());
+ GetMappingsRequest getMappingsRequest = new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(randomIndicesOrAliases());
internalCluster().coordOnlyNodeClient().admin().indices().getMappings(getMappingsRequest).actionGet();
clearInterceptedActions();
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
index 4c25b3262d559..b14bf38f3cbcc 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java
@@ -120,7 +120,7 @@ public void testNonNestedMappings() throws Exception {
)
);
- GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
MappingMetadata mappings = response.mappings().get("test");
assertNotNull(mappings);
@@ -130,7 +130,7 @@ public void testNonNestedMappings() throws Exception {
public void testEmptyNestedMappings() throws Exception {
assertAcked(prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().endObject()));
- GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
MappingMetadata mappings = response.mappings().get("test");
assertNotNull(mappings);
@@ -150,7 +150,7 @@ public void testEmptyMappings() throws Exception {
prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject())
);
- GetMappingsResponse response = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
MappingMetadata mappings = response.mappings().get("test");
assertNotNull(mappings);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java
index 867ca89f9e7f3..b9dadf86c3345 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java
@@ -357,8 +357,9 @@ private void assertHasAliases(Set aliasNames, String name, String primar
* Fetch the mappings and settings for {@link TestSystemIndexDescriptor#INDEX_NAME} and verify that they match the expected values.
*/
private void assertMappingsAndSettings(String expectedMappings, String concreteIndex) {
- final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(INDEX_NAME))
- .actionGet();
+ final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(
+ new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME)
+ ).actionGet();
final Map mappings = getMappingsResponse.getMappings();
assertThat(
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java
index e45555b1dec19..2cd319d148321 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java
@@ -55,7 +55,7 @@ public void testBulkIndexCreatesMapping() throws Exception {
bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON);
bulkBuilder.get();
assertBusy(() -> {
- GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30"));
});
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
index 7777dd778a6c1..9eed1f757b5b1 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java
@@ -258,11 +258,14 @@ public void testLargeClusterStatePublishing() throws Exception {
.setTimeout(TimeValue.timeValueMinutes(1))
);
ensureGreen(); // wait for green state, so its both green, and there are no more pending events
- MappingMetadata masterMappingMetadata = indicesAdmin().prepareGetMappings("test").get().getMappings().get("test");
+ MappingMetadata masterMappingMetadata = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
+ .get()
+ .getMappings()
+ .get("test");
for (Client client : clients()) {
MappingMetadata mappingMetadata = client.admin()
.indices()
- .prepareGetMappings("test")
+ .prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
.setLocal(true)
.get()
.getMappings()
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java
index d6ccdf3dc0399..256566045c59a 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java
@@ -115,7 +115,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception {
)
.get();
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(index).get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
assertNotNull(
((Map) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("integer_field")
);
@@ -146,7 +146,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception {
)
.get();
- getMappingsResponse = indicesAdmin().prepareGetMappings(index).get();
+ getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
assertNotNull(
((Map) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("float_field")
);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java
index 802ba04375c48..24af560f608d3 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java
@@ -97,7 +97,7 @@ public void testGlobalTemplatesDoNotApply() {
assertAcked(indicesAdmin().prepareCreate("a_hidden_index").setSettings(Settings.builder().put("index.hidden", true).build()));
- GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings("a_hidden_index").get();
+ GetMappingsResponse mappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "a_hidden_index").get();
assertThat(mappingsResponse.mappings().size(), is(1));
MappingMetadata mappingMetadata = mappingsResponse.mappings().get("a_hidden_index");
assertNotNull(mappingMetadata);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java
index 960ee2fd7ca60..6bca87ebd6e3d 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java
@@ -49,7 +49,7 @@ public void testBasic() {
assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest));
Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts");
assertThat(settings.get("index.mode"), equalTo("lookup"));
- assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all"));
+ assertNull(settings.get("index.auto_expand_replicas"));
Map allHosts = Map.of(
"192.168.1.2",
"Windows",
@@ -141,7 +141,6 @@ public void testResizeLookupIndex() {
Settings settings = client().admin().indices().prepareGetSettings("lookup-2").get().getIndexToSettings().get("lookup-2");
assertThat(settings.get("index.mode"), equalTo("lookup"));
assertThat(settings.get("index.number_of_shards"), equalTo("1"));
- assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all"));
ResizeRequest split = new ResizeRequest("lookup-3", "lookup-1");
split.setResizeType(ResizeType.SPLIT);
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java
index a37fb25052ac5..06561bc6d4c97 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java
@@ -96,7 +96,11 @@ public void testSimpleDynamicMappingsSuccessful() {
client().prepareIndex("index").setId("1").setSource("a.x", 1).get();
client().prepareIndex("index").setId("2").setSource("a.y", 2).get();
- Map mappings = indicesAdmin().prepareGetMappings("index").get().mappings().get("index").sourceAsMap();
+ Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index")
+ .get()
+ .mappings()
+ .get("index")
+ .sourceAsMap();
assertTrue(new WriteField("properties.a", () -> mappings).exists());
assertTrue(new WriteField("properties.a.properties.x", () -> mappings).exists());
}
@@ -183,7 +187,7 @@ private Map indexConcurrently(int numberOfFieldsToCreate, Settin
for (int i = 0; i < numberOfFieldsToCreate; ++i) {
assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists());
}
- GetMappingsResponse mappings = indicesAdmin().prepareGetMappings("index").get();
+ GetMappingsResponse mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index").get();
MappingMetadata indexMappings = mappings.getMappings().get("index");
assertNotNull(indexMappings);
Map typeMappingsMap = indexMappings.getSourceAsMap();
@@ -214,7 +218,11 @@ public void testConcurrentDynamicMappingsWithConflictingType() throws Throwable
for (int i = 0; i < numberOfDocsToCreate; ++i) {
assertTrue(client().prepareGet("index", Integer.toString(i)).get().isExists());
}
- Map index = indicesAdmin().prepareGetMappings("index").get().getMappings().get("index").getSourceAsMap();
+ Map index = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "index")
+ .get()
+ .getMappings()
+ .get("index")
+ .getSourceAsMap();
for (int i = 0, j = 1; i < numberOfDocsToCreate; i++, j++) {
assertThat(new WriteField("properties.field" + i + ".type", () -> index).get(null), is(oneOf("long", "float")));
assertThat(new WriteField("properties.field" + j + ".type", () -> index).get(null), is(oneOf("long", "float")));
@@ -806,7 +814,11 @@ public void testSubobjectsFalseAtRoot() throws Exception {
assertEquals(RestStatus.CREATED, indexResponse.status());
assertBusy(() -> {
- Map mappings = indicesAdmin().prepareGetMappings("test").get().mappings().get("test").sourceAsMap();
+ Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
+ .get()
+ .mappings()
+ .get("test")
+ .sourceAsMap();
@SuppressWarnings("unchecked")
Map properties = (Map) mappings.get("properties");
assertEquals(4, properties.size());
@@ -851,7 +863,11 @@ public void testSubobjectsFalse() throws Exception {
assertEquals(RestStatus.CREATED, indexResponse.status());
assertBusy(() -> {
- Map mappings = indicesAdmin().prepareGetMappings("test").get().mappings().get("test").sourceAsMap();
+ Map mappings = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test")
+ .get()
+ .mappings()
+ .get("test")
+ .sourceAsMap();
Map properties = (Map) mappings.get("properties");
Map foo = (Map) properties.get("foo");
properties = (Map) foo.get("properties");
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
index 4a7de4b0ebc23..1a51fc12fed8e 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/MultiFieldsIntegrationIT.java
@@ -37,7 +37,7 @@ public class MultiFieldsIntegrationIT extends ESIntegTestCase {
public void testMultiFields() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createTypeSource()));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
@@ -53,7 +53,7 @@ public void testMultiFields() throws Exception {
assertAcked(indicesAdmin().preparePutMapping("my-index").setSource(createPutMappingSource()));
- getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
mappingSource = mappingMetadata.sourceAsMap();
@@ -74,7 +74,7 @@ public void testMultiFields() throws Exception {
public void testGeoPointMultiField() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("geo_point")));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
@@ -102,7 +102,7 @@ public void testGeoPointMultiField() throws Exception {
public void testCompletionMultiField() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("completion")));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
@@ -123,7 +123,7 @@ public void testCompletionMultiField() throws Exception {
public void testIpMultiField() throws Exception {
assertAcked(indicesAdmin().prepareCreate("my-index").setMapping(createMappingSource("ip")));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("my-index").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "my-index").get();
MappingMetadata mappingMetadata = getMappingsResponse.mappings().get("my-index");
assertThat(mappingMetadata, not(nullValue()));
Map mappingSource = mappingMetadata.sourceAsMap();
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java
index 652f4e02ffbce..c53cf3b56f65a 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java
@@ -537,26 +537,26 @@ public void testPutMapping() throws Exception {
}
verify(indicesAdmin().preparePutMapping("foo").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
verify(indicesAdmin().preparePutMapping("b*").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping("_all").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("foobar").get().mappings().get("foobar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping().setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("foo").get().mappings().get("foo"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("foobar").get().mappings().get("foobar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("bar").get().mappings().get("bar"), notNullValue());
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foo").get().mappings().get("foo"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "foobar").get().mappings().get("foobar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "bar").get().mappings().get("bar"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
verify(indicesAdmin().preparePutMapping("c*").setSource("field", "type=text"), true);
assertAcked(indicesAdmin().prepareClose("barbaz").get());
verify(indicesAdmin().preparePutMapping("barbaz").setSource("field", "type=text"), false);
- assertThat(indicesAdmin().prepareGetMappings("barbaz").get().mappings().get("barbaz"), notNullValue());
+ assertThat(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "barbaz").get().mappings().get("barbaz"), notNullValue());
}
public static final class TestPlugin extends Plugin {
@@ -664,7 +664,7 @@ static GetFieldMappingsRequestBuilder getFieldMapping(String... indices) {
}
static GetMappingsRequestBuilder getMapping(String... indices) {
- return indicesAdmin().prepareGetMappings(indices);
+ return indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, indices);
}
static GetSettingsRequestBuilder getSettings(String... indices) {
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
index de565605ff58a..7264585337fc7 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceIT.java
@@ -123,8 +123,9 @@ private void triggerClusterStateUpdates() {
* Fetch the mappings and settings for {@link TestSystemIndexDescriptor#INDEX_NAME} and verify that they match the expected values.
*/
private void assertMappingsAndSettings(String expectedMappings) {
- final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(new GetMappingsRequest().indices(INDEX_NAME))
- .actionGet();
+ final GetMappingsResponse getMappingsResponse = indicesAdmin().getMappings(
+ new GetMappingsRequest(TEST_REQUEST_TIMEOUT).indices(INDEX_NAME)
+ ).actionGet();
final Map mappings = getMappingsResponse.getMappings();
assertThat(
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java
index 71fcb25c2e0b2..e3092bda185fe 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java
@@ -198,7 +198,7 @@ public void testGetFieldMappingsWithBlocks() throws Exception {
try {
enableIndexBlock("test", SETTING_BLOCKS_METADATA);
- assertBlocked(indicesAdmin().prepareGetMappings(), INDEX_METADATA_BLOCK);
+ assertBlocked(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT), INDEX_METADATA_BLOCK);
} finally {
disableIndexBlock("test", SETTING_BLOCKS_METADATA);
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java
index 20e59fab3bd0c..023aa402b7337 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java
@@ -42,7 +42,7 @@ protected Collection> nodePlugins() {
public void testGetMappingsWhereThereAreNone() {
createIndex("index");
- GetMappingsResponse response = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertThat(response.mappings().containsKey("index"), equalTo(true));
assertEquals(MappingMetadata.EMPTY_MAPPINGS, response.mappings().get("index"));
}
@@ -70,19 +70,19 @@ public void testSimpleGetMappings() throws Exception {
assertThat(clusterHealth.isTimedOut(), equalTo(false));
// Get all mappings
- GetMappingsResponse response = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertThat(response.mappings().size(), equalTo(2));
assertThat(response.mappings().get("indexa"), notNullValue());
assertThat(response.mappings().get("indexb"), notNullValue());
// Get all mappings, via wildcard support
- response = indicesAdmin().prepareGetMappings("*").get();
+ response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "*").get();
assertThat(response.mappings().size(), equalTo(2));
assertThat(response.mappings().get("indexa"), notNullValue());
assertThat(response.mappings().get("indexb"), notNullValue());
// Get mappings in indexa
- response = indicesAdmin().prepareGetMappings("indexa").get();
+ response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "indexa").get();
assertThat(response.mappings().size(), equalTo(1));
assertThat(response.mappings().get("indexa"), notNullValue());
}
@@ -94,7 +94,7 @@ public void testGetMappingsWithBlocks() throws IOException {
for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
try {
enableIndexBlock("test", block);
- GetMappingsResponse response = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertThat(response.mappings().size(), equalTo(1));
assertNotNull(response.mappings().get("test"));
} finally {
@@ -104,7 +104,7 @@ public void testGetMappingsWithBlocks() throws IOException {
try {
enableIndexBlock("test", SETTING_BLOCKS_METADATA);
- assertBlocked(indicesAdmin().prepareGetMappings(), INDEX_METADATA_BLOCK);
+ assertBlocked(indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT), INDEX_METADATA_BLOCK);
} finally {
disableIndexBlock("test", SETTING_BLOCKS_METADATA);
}
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
index 6f6e488d46b23..fa2598348a1ce 100644
--- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java
@@ -109,7 +109,7 @@ public void testUpdateMappingWithoutType() {
assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo("""
{"_doc":{"properties":{"body":{"type":"text"},"date":{"type":"integer"}}}}"""));
}
@@ -123,7 +123,7 @@ public void testUpdateMappingWithoutTypeMultiObjects() {
assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
assertThat(getMappingsResponse.mappings().get("test").source().toString(), equalTo("""
{"_doc":{"properties":{"date":{"type":"integer"}}}}"""));
}
@@ -215,7 +215,10 @@ public void testUpdateMappingConcurrently() throws Throwable {
.get();
assertThat(response.isAcknowledged(), equalTo(true));
- GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get();
+ GetMappingsResponse getMappingResponse = client2.admin()
+ .indices()
+ .prepareGetMappings(TEST_REQUEST_TIMEOUT, indexName)
+ .get();
MappingMetadata mappings = getMappingResponse.getMappings().get(indexName);
@SuppressWarnings("unchecked")
Map properties = (Map) mappings.getSourceAsMap().get("properties");
@@ -284,7 +287,7 @@ private void assertConcreteMappingsOnAll(final String index, final String... fie
* Waits for the given mapping type to exists on the master node.
*/
private void assertMappingOnMaster(final String index, final String... fieldNames) {
- GetMappingsResponse response = indicesAdmin().prepareGetMappings(index).get();
+ GetMappingsResponse response = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
MappingMetadata mappings = response.getMappings().get(index);
assertThat(mappings, notNullValue());
diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java
index 4112290fa4e04..2a68b65bcdccb 100644
--- a/server/src/main/java/module-info.java
+++ b/server/src/main/java/module-info.java
@@ -424,22 +424,14 @@
provides org.elasticsearch.features.FeatureSpecification
with
- org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures,
org.elasticsearch.action.bulk.BulkFeatures,
org.elasticsearch.features.FeatureInfrastructureFeatures,
- org.elasticsearch.health.HealthFeatures,
- org.elasticsearch.cluster.metadata.MetadataFeatures,
- org.elasticsearch.rest.RestFeatures,
- org.elasticsearch.repositories.RepositoriesFeatures,
- org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures,
org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures,
org.elasticsearch.index.mapper.MapperFeatures,
org.elasticsearch.index.IndexFeatures,
- org.elasticsearch.ingest.IngestGeoIpFeatures,
org.elasticsearch.search.SearchFeatures,
org.elasticsearch.script.ScriptFeatures,
org.elasticsearch.search.retriever.RetrieversFeatures,
- org.elasticsearch.reservedstate.service.FileSettingsFeatures,
org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures;
uses org.elasticsearch.plugins.internal.SettingsExtension;
diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java
index 98d6284fd91d2..ec393b7af5cdf 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -1004,7 +1004,7 @@ public void initRestHandlers(Supplier nodesInCluster, Predicate<
// Desired nodes
registerHandler.accept(new RestGetDesiredNodesAction());
- registerHandler.accept(new RestUpdateDesiredNodesAction(clusterSupportsFeature));
+ registerHandler.accept(new RestUpdateDesiredNodesAction());
registerHandler.accept(new RestDeleteDesiredNodesAction());
for (ActionPlugin plugin : actionPlugins) {
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java
deleted file mode 100644
index 164fc816ad367..0000000000000
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/AllocationStatsFeatures.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.action.admin.cluster.allocation;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class AllocationStatsFeatures implements FeatureSpecification {
- public static final NodeFeature INCLUDE_DISK_THRESHOLD_SETTINGS = new NodeFeature("stats.include_disk_thresholds", true);
-
- @Override
- public Set getFeatures() {
- return Set.of(INCLUDE_DISK_THRESHOLD_SETTINGS);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
index d929fb457d5d1..23bf22e08985e 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java
@@ -30,7 +30,6 @@
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
@@ -49,7 +48,6 @@ public class TransportGetAllocationStatsAction extends TransportMasterNodeReadAc
private final AllocationStatsService allocationStatsService;
private final DiskThresholdSettings diskThresholdSettings;
- private final FeatureService featureService;
@Inject
public TransportGetAllocationStatsAction(
@@ -58,8 +56,7 @@ public TransportGetAllocationStatsAction(
ThreadPool threadPool,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
- AllocationStatsService allocationStatsService,
- FeatureService featureService
+ AllocationStatsService allocationStatsService
) {
super(
TYPE.name(),
@@ -74,7 +71,6 @@ public TransportGetAllocationStatsAction(
);
this.allocationStatsService = allocationStatsService;
this.diskThresholdSettings = new DiskThresholdSettings(clusterService.getSettings(), clusterService.getClusterSettings());
- this.featureService = featureService;
}
@Override
@@ -92,10 +88,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A
listener.onResponse(
new Response(
request.metrics().contains(Metric.ALLOCATIONS) ? allocationStatsService.stats() : Map.of(),
- request.metrics().contains(Metric.FS)
- && featureService.clusterHasFeature(clusterService.state(), AllocationStatsFeatures.INCLUDE_DISK_THRESHOLD_SETTINGS)
- ? diskThresholdSettings
- : null
+ request.metrics().contains(Metric.FS) ? diskThresholdSettings : null
)
);
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java
index 9fede2ebb5be6..beb0e1f927de2 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/capabilities/TransportNodesCapabilitiesAction.java
@@ -9,7 +9,6 @@
package org.elasticsearch.action.admin.cluster.node.capabilities;
-import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
@@ -20,11 +19,9 @@
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.RestApiVersion;
import org.elasticsearch.core.UpdateForV9;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
-import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportRequest;
@@ -32,7 +29,6 @@
import java.io.IOException;
import java.util.List;
-import java.util.Optional;
import java.util.Set;
public class TransportNodesCapabilitiesAction extends TransportNodesAction<
@@ -45,7 +41,6 @@ public class TransportNodesCapabilitiesAction extends TransportNodesAction<
public static final ActionType TYPE = new ActionType<>("cluster:monitor/nodes/capabilities");
private final RestController restController;
- private final FeatureService featureService;
@Inject
public TransportNodesCapabilitiesAction(
@@ -53,8 +48,7 @@ public TransportNodesCapabilitiesAction(
ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters,
- RestController restController,
- FeatureService featureService
+ RestController restController
) {
super(
TYPE.name(),
@@ -65,23 +59,6 @@ public TransportNodesCapabilitiesAction(
threadPool.executor(ThreadPool.Names.MANAGEMENT)
);
this.restController = restController;
- this.featureService = featureService;
- }
-
- @Override
- protected void doExecute(Task task, NodesCapabilitiesRequest request, ActionListener listener) {
- if (featureService.clusterHasFeature(clusterService.state(), RestNodesCapabilitiesAction.CAPABILITIES_ACTION) == false) {
- // not everything in the cluster supports capabilities.
- // Therefore we don't support whatever it is we're being asked for
- listener.onResponse(new NodesCapabilitiesResponse(clusterService.getClusterName(), List.of(), List.of()) {
- @Override
- public Optional isSupported() {
- return Optional.of(false);
- }
- });
- } else {
- super.doExecute(task, request, listener);
- }
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java
index 7e3c38c735091..ca02d19749ae7 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java
@@ -13,13 +13,18 @@
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionType;
-import org.elasticsearch.action.support.master.MasterNodeReadRequest;
+import org.elasticsearch.action.support.local.LocalClusterStateRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.core.UpdateForV10;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
+import java.util.Map;
import java.util.Objects;
public class ClusterGetSettingsAction extends ActionType {
@@ -34,25 +39,29 @@ public ClusterGetSettingsAction() {
/**
* Request to retrieve the cluster settings
*/
- public static class Request extends MasterNodeReadRequest {
+ public static class Request extends LocalClusterStateRequest {
public Request(TimeValue masterNodeTimeout) {
super(masterNodeTimeout);
}
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA)
public Request(StreamInput in) throws IOException {
super(in);
assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0);
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0);
- super.writeTo(out);
+ public ActionRequestValidationException validate() {
+ return null;
}
@Override
- public ActionRequestValidationException validate() {
- return null;
+ public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) {
+ return new CancellableTask(id, type, action, "", parentTaskId, headers);
}
}
@@ -79,20 +88,17 @@ public int hashCode() {
return Objects.hash(persistentSettings, transientSettings, settings);
}
- public Response(StreamInput in) throws IOException {
- super(in);
- assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0);
- persistentSettings = Settings.readSettingsFromStream(in);
- transientSettings = Settings.readSettingsFromStream(in);
- settings = Settings.readSettingsFromStream(in);
- }
-
public Response(Settings persistentSettings, Settings transientSettings, Settings settings) {
this.persistentSettings = Objects.requireNonNullElse(persistentSettings, Settings.EMPTY);
this.transientSettings = Objects.requireNonNullElse(transientSettings, Settings.EMPTY);
this.settings = Objects.requireNonNullElse(settings, Settings.EMPTY);
}
+ /**
+ * NB prior to 9.0 get-component was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA)
@Override
public void writeTo(StreamOutput out) throws IOException {
assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java
index dce6a38001392..71b976e012aad 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterGetSettingsAction.java
@@ -11,57 +11,66 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
-import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
+import org.elasticsearch.action.support.ChannelActionListener;
+import org.elasticsearch.action.support.local.TransportLocalClusterStateAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject;
+import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
-import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-public class TransportClusterGetSettingsAction extends TransportMasterNodeReadAction<
+public class TransportClusterGetSettingsAction extends TransportLocalClusterStateAction<
ClusterGetSettingsAction.Request,
ClusterGetSettingsAction.Response> {
private final SettingsFilter settingsFilter;
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA)
+ @SuppressWarnings("this-escape")
@Inject
public TransportClusterGetSettingsAction(
TransportService transportService,
ClusterService clusterService,
- ThreadPool threadPool,
SettingsFilter settingsFilter,
- ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver
+ ActionFilters actionFilters
) {
super(
ClusterGetSettingsAction.NAME,
- false,
- transportService,
- clusterService,
- threadPool,
actionFilters,
- ClusterGetSettingsAction.Request::new,
- indexNameExpressionResolver,
- ClusterGetSettingsAction.Response::new,
+ transportService.getTaskManager(),
+ clusterService,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
-
this.settingsFilter = settingsFilter;
+
+ transportService.registerRequestHandler(
+ actionName,
+ executor,
+ false,
+ true,
+ ClusterGetSettingsAction.Request::new,
+ (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel))
+ );
}
@Override
- protected void masterOperation(
+ protected void localClusterStateOperation(
Task task,
ClusterGetSettingsAction.Request request,
ClusterState state,
ActionListener listener
) throws Exception {
+ ((CancellableTask) task).ensureNotCancelled();
Metadata metadata = state.metadata();
listener.onResponse(
new ClusterGetSettingsAction.Response(
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java
index 1bc2e1d13c864..29a124b3d0b20 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java
@@ -21,7 +21,7 @@
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
+import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -71,7 +71,7 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) {
}
AnalysisStats.countMapping(mappingCounts, indexMetadata);
- var sourceMode = SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings());
+ var sourceMode = IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexMetadata.getSettings());
sourceModeUsageCount.merge(sourceMode.toString().toLowerCase(Locale.ENGLISH), 1, Integer::sum);
}
final AtomicLong totalFieldCount = new AtomicLong();
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
index dd4114c947174..84789d8a2acfb 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequest.java
@@ -12,6 +12,7 @@
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.info.ClusterInfoRequest;
import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.core.TimeValue;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
@@ -21,7 +22,9 @@
public class GetMappingsRequest extends ClusterInfoRequest {
- public GetMappingsRequest() {}
+ public GetMappingsRequest(TimeValue masterTimeout) {
+ super(masterTimeout);
+ }
public GetMappingsRequest(StreamInput in) throws IOException {
super(in);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
index 3f54138581398..a12ba4f60c26a 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java
@@ -11,13 +11,14 @@
import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder;
import org.elasticsearch.client.internal.ElasticsearchClient;
+import org.elasticsearch.core.TimeValue;
public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder<
GetMappingsRequest,
GetMappingsResponse,
GetMappingsRequestBuilder> {
- public GetMappingsRequestBuilder(ElasticsearchClient client, String... indices) {
- super(client, GetMappingsAction.INSTANCE, new GetMappingsRequest().indices(indices));
+ public GetMappingsRequestBuilder(ElasticsearchClient client, TimeValue masterTimeout, String... indices) {
+ super(client, GetMappingsAction.INSTANCE, new GetMappingsRequest(masterTimeout).indices(indices));
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java
index be08293fe90db..c845d1a3854c9 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java
@@ -33,7 +33,6 @@
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Iterators;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
@@ -55,8 +54,6 @@ public final class LazyRolloverAction extends ActionType {
private static final Logger logger = LogManager.getLogger(LazyRolloverAction.class);
- public static final NodeFeature DATA_STREAM_LAZY_ROLLOVER = new NodeFeature("data_stream.rollover.lazy", true);
-
public static final LazyRolloverAction INSTANCE = new LazyRolloverAction();
public static final String NAME = "indices:admin/data_stream/lazy_rollover";
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
index 67b6df150c458..6106e620521f7 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java
@@ -12,7 +12,6 @@
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import java.util.ArrayList;
import java.util.HashMap;
@@ -22,9 +21,6 @@
public class IndexStats implements Iterable {
- // feature was effectively reverted but we still need to keep this constant around
- public static final NodeFeature REVERTED_TIER_CREATION_DATE = new NodeFeature("stats.tier_creation_date", true);
-
private final String index;
private final String uuid;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java
deleted file mode 100644
index 558343db1023a..0000000000000
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsFeatures.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.action.admin.indices.stats;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class IndicesStatsFeatures implements FeatureSpecification {
-
- @Override
- public Set getFeatures() {
- return Set.of(IndexStats.REVERTED_TIER_CREATION_DATE);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java
index ce29d65ececf9..003be58d19554 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java
@@ -12,16 +12,20 @@
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
-import org.elasticsearch.action.support.master.MasterNodeReadRequest;
+import org.elasticsearch.action.support.local.LocalClusterStateRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
+import java.util.Map;
import java.util.Objects;
-public class SimulateIndexTemplateRequest extends MasterNodeReadRequest {
+public class SimulateIndexTemplateRequest extends LocalClusterStateRequest {
private String indexName;
@@ -30,14 +34,18 @@ public class SimulateIndexTemplateRequest extends MasterNodeReadRequest headers) {
+ return new CancellableTask(id, type, action, "", parentTaskId, headers);
+ }
+
public String getIndexName() {
return indexName;
}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
index a521dac60e96a..1a04b6e4d7633 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java
@@ -12,13 +12,11 @@
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration;
-import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
import org.elasticsearch.cluster.metadata.ResettableValue;
import org.elasticsearch.cluster.metadata.Template;
-import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.util.Maps;
import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -67,27 +65,11 @@ public RolloverConfiguration getRolloverConfiguration() {
return rolloverConfiguration;
}
- public SimulateIndexTemplateResponse(StreamInput in) throws IOException {
- super(in);
- resolvedTemplate = in.readOptionalWriteable(Template::new);
- if (in.readBoolean()) {
- int overlappingTemplatesCount = in.readInt();
- overlappingTemplates = Maps.newMapWithExpectedSize(overlappingTemplatesCount);
- for (int i = 0; i < overlappingTemplatesCount; i++) {
- String templateName = in.readString();
- overlappingTemplates.put(templateName, in.readStringCollectionAsList());
- }
- } else {
- this.overlappingTemplates = null;
- }
- rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)
- ? in.readOptionalWriteable(RolloverConfiguration::new)
- : null;
- if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
- in.readOptionalWriteable(DataStreamGlobalRetention::read);
- }
- }
-
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(resolvedTemplate);
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java
index 75cc72416a854..15015b910767e 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java
@@ -14,12 +14,16 @@
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
-import org.elasticsearch.action.support.master.MasterNodeReadRequest;
+import org.elasticsearch.action.support.local.LocalClusterStateRequest;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.Nullable;
+import org.elasticsearch.core.TimeValue;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
import java.io.IOException;
+import java.util.Map;
import java.util.Objects;
/**
@@ -35,7 +39,7 @@ private SimulateTemplateAction() {
super(NAME);
}
- public static class Request extends MasterNodeReadRequest {
+ public static class Request extends LocalClusterStateRequest {
@Nullable
private String templateName;
@@ -44,26 +48,15 @@ public static class Request extends MasterNodeReadRequest {
private TransportPutComposableIndexTemplateAction.Request indexTemplateRequest;
private boolean includeDefaults = false;
- public Request() {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
- }
-
- public Request(String templateName) {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
- if (templateName == null) {
- throw new IllegalArgumentException("template name cannot be null");
- }
+ public Request(TimeValue masterTimeout, String templateName) {
+ super(masterTimeout);
this.templateName = templateName;
}
- public Request(TransportPutComposableIndexTemplateAction.Request indexTemplateRequest) {
- super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
- if (indexTemplateRequest == null) {
- throw new IllegalArgumentException("index template body must be present");
- }
- this.indexTemplateRequest = indexTemplateRequest;
- }
-
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until
+ * we no longer need to support calling this action remotely.
+ */
public Request(StreamInput in) throws IOException {
super(in);
templateName = in.readOptionalString();
@@ -73,16 +66,6 @@ public Request(StreamInput in) throws IOException {
}
}
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- super.writeTo(out);
- out.writeOptionalString(templateName);
- out.writeOptionalWriteable(indexTemplateRequest);
- if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
- out.writeBoolean(includeDefaults);
- }
- }
-
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
@@ -98,6 +81,11 @@ public ActionRequestValidationException validate() {
return validationException;
}
+ @Override
+ public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) {
+ return new CancellableTask(id, type, action, "", parentTaskId, headers);
+ }
+
@Nullable
public String getTemplateName() {
return templateName;
@@ -112,11 +100,6 @@ public TransportPutComposableIndexTemplateAction.Request getIndexTemplateRequest
return indexTemplateRequest;
}
- public Request templateName(String templateName) {
- this.templateName = templateName;
- return this;
- }
-
public Request indexTemplateRequest(TransportPutComposableIndexTemplateAction.Request indexTemplateRequest) {
this.indexTemplateRequest = indexTemplateRequest;
return this;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java
index d3d557b598b3a..74936128caa25 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java
@@ -11,7 +11,8 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
-import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
+import org.elasticsearch.action.support.ChannelActionListener;
+import org.elasticsearch.action.support.local.TransportLocalClusterStateAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@@ -20,7 +21,6 @@
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
import org.elasticsearch.cluster.metadata.IndexMetadata;
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.MetadataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService;
@@ -31,6 +31,7 @@
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettingProvider;
import org.elasticsearch.index.IndexSettingProviders;
@@ -42,7 +43,6 @@
import org.elasticsearch.indices.SystemIndices;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
-import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xcontent.NamedXContentRegistry;
@@ -65,7 +65,7 @@
import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveLifecycle;
import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveSettings;
-public class TransportSimulateIndexTemplateAction extends TransportMasterNodeReadAction<
+public class TransportSimulateIndexTemplateAction extends TransportLocalClusterStateAction<
SimulateIndexTemplateRequest,
SimulateIndexTemplateResponse> {
@@ -77,14 +77,18 @@ public class TransportSimulateIndexTemplateAction extends TransportMasterNodeRea
private final ClusterSettings clusterSettings;
private final boolean isDslOnlyMode;
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
+ @SuppressWarnings("this-escape")
@Inject
public TransportSimulateIndexTemplateAction(
TransportService transportService,
ClusterService clusterService,
- ThreadPool threadPool,
MetadataIndexTemplateService indexTemplateService,
ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver,
NamedXContentRegistry xContentRegistry,
IndicesService indicesService,
SystemIndices systemIndices,
@@ -92,13 +96,9 @@ public TransportSimulateIndexTemplateAction(
) {
super(
SimulateIndexTemplateAction.NAME,
- transportService,
- clusterService,
- threadPool,
actionFilters,
- SimulateIndexTemplateRequest::new,
- indexNameExpressionResolver,
- SimulateIndexTemplateResponse::new,
+ transportService.getTaskManager(),
+ clusterService,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.indexTemplateService = indexTemplateService;
@@ -108,10 +108,19 @@ public TransportSimulateIndexTemplateAction(
this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders();
this.clusterSettings = clusterService.getClusterSettings();
this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings());
+
+ transportService.registerRequestHandler(
+ actionName,
+ executor,
+ false,
+ true,
+ SimulateIndexTemplateRequest::new,
+ (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel))
+ );
}
@Override
- protected void masterOperation(
+ protected void localClusterStateOperation(
Task task,
SimulateIndexTemplateRequest request,
ClusterState state,
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java
index 30bbad0b57df0..692f027b23f9e 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java
@@ -11,26 +11,26 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
-import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
+import org.elasticsearch.action.support.ChannelActionListener;
+import org.elasticsearch.action.support.local.TransportLocalClusterStateAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService;
import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.index.IndexSettingProvider;
import org.elasticsearch.index.IndexSettingProviders;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.SystemIndices;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
-import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xcontent.NamedXContentRegistry;
@@ -48,7 +48,7 @@
* Handles simulating an index template either by name (looking it up in the
* cluster state), or by a provided template configuration
*/
-public class TransportSimulateTemplateAction extends TransportMasterNodeReadAction<
+public class TransportSimulateTemplateAction extends TransportLocalClusterStateAction<
SimulateTemplateAction.Request,
SimulateIndexTemplateResponse> {
@@ -60,14 +60,18 @@ public class TransportSimulateTemplateAction extends TransportMasterNodeReadActi
private final ClusterSettings clusterSettings;
private final boolean isDslOnlyMode;
+ /**
+ * NB prior to 9.0 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService until
+ * we no longer need to support calling this action remotely.
+ */
+ @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT)
+ @SuppressWarnings("this-escape")
@Inject
public TransportSimulateTemplateAction(
TransportService transportService,
ClusterService clusterService,
- ThreadPool threadPool,
MetadataIndexTemplateService indexTemplateService,
ActionFilters actionFilters,
- IndexNameExpressionResolver indexNameExpressionResolver,
NamedXContentRegistry xContentRegistry,
IndicesService indicesService,
SystemIndices systemIndices,
@@ -75,13 +79,9 @@ public TransportSimulateTemplateAction(
) {
super(
SimulateTemplateAction.NAME,
- transportService,
- clusterService,
- threadPool,
actionFilters,
- SimulateTemplateAction.Request::new,
- indexNameExpressionResolver,
- SimulateIndexTemplateResponse::new,
+ transportService.getTaskManager(),
+ clusterService,
EsExecutors.DIRECT_EXECUTOR_SERVICE
);
this.indexTemplateService = indexTemplateService;
@@ -91,10 +91,19 @@ public TransportSimulateTemplateAction(
this.indexSettingProviders = indexSettingProviders.getIndexSettingProviders();
this.clusterSettings = clusterService.getClusterSettings();
this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings());
+
+ transportService.registerRequestHandler(
+ actionName,
+ executor,
+ false,
+ true,
+ SimulateTemplateAction.Request::new,
+ (request, channel, task) -> executeDirect(task, request, new ChannelActionListener<>(channel))
+ );
}
@Override
- protected void masterOperation(
+ protected void localClusterStateOperation(
Task task,
SimulateTemplateAction.Request request,
ClusterState state,
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java
index 998a3ada5d157..5851549977eab 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java
@@ -14,24 +14,10 @@
import java.util.Set;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS;
import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_IGNORED_FIELDS;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_ADDITION;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES;
-import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING;
public class BulkFeatures implements FeatureSpecification {
public Set getFeatures() {
- return Set.of(
- SIMULATE_MAPPING_VALIDATION,
- SIMULATE_MAPPING_VALIDATION_TEMPLATES,
- SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS,
- SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS,
- SIMULATE_MAPPING_ADDITION,
- SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING,
- SIMULATE_IGNORED_FIELDS
- );
+ return Set.of(SIMULATE_IGNORED_FIELDS);
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
index 2a6a789d9d312..523381321ada7 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java
@@ -44,7 +44,6 @@
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.indices.SystemIndices;
@@ -80,7 +79,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
private static final Logger logger = LogManager.getLogger(TransportBulkAction.class);
public static final String LAZY_ROLLOVER_ORIGIN = "lazy_rollover";
- private final FeatureService featureService;
private final NodeClient client;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final OriginSettingClient rolloverClient;
@@ -93,7 +91,6 @@ public TransportBulkAction(
TransportService transportService,
ClusterService clusterService,
IngestService ingestService,
- FeatureService featureService,
NodeClient client,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
@@ -107,7 +104,6 @@ public TransportBulkAction(
transportService,
clusterService,
ingestService,
- featureService,
client,
actionFilters,
indexNameExpressionResolver,
@@ -124,7 +120,6 @@ public TransportBulkAction(
TransportService transportService,
ClusterService clusterService,
IngestService ingestService,
- FeatureService featureService,
NodeClient client,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
@@ -141,7 +136,6 @@ public TransportBulkAction(
transportService,
clusterService,
ingestService,
- featureService,
client,
actionFilters,
indexNameExpressionResolver,
@@ -160,7 +154,6 @@ public TransportBulkAction(
TransportService transportService,
ClusterService clusterService,
IngestService ingestService,
- FeatureService featureService,
NodeClient client,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
@@ -184,7 +177,6 @@ public TransportBulkAction(
);
this.dataStreamFailureStoreSettings = dataStreamFailureStoreSettings;
Objects.requireNonNull(relativeTimeProvider);
- this.featureService = featureService;
this.client = client;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.rolloverClient = new OriginSettingClient(client, LAZY_ROLLOVER_ORIGIN);
@@ -283,7 +275,6 @@ private void populateMissingTargets(
// A map for memorizing which indices exist.
Map indexExistence = new HashMap<>();
Function indexExistenceComputation = (index) -> indexNameExpressionResolver.hasIndexAbstraction(index, state);
- boolean lazyRolloverFeature = featureService.clusterHasFeature(state, LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER);
boolean lazyRolloverFailureStoreFeature = DataStream.isFailureStoreFeatureFlagEnabled();
Set indicesThatRequireAlias = new HashSet<>();
@@ -328,18 +319,15 @@ private void populateMissingTargets(
}
}
// Determine which data streams and failure stores need to be rolled over.
- if (lazyRolloverFeature) {
- DataStream dataStream = state.metadata().dataStreams().get(request.index());
- if (dataStream != null) {
- if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) {
- dataStreamsToBeRolledOver.add(request.index());
- } else if (lazyRolloverFailureStoreFeature
- && writeToFailureStore
- && dataStream.getFailureIndices().isRolloverOnWrite()) {
- failureStoresToBeRolledOver.add(request.index());
- }
+ DataStream dataStream = state.metadata().dataStreams().get(request.index());
+ if (dataStream != null) {
+ if (writeToFailureStore == false && dataStream.getBackingIndices().isRolloverOnWrite()) {
+ dataStreamsToBeRolledOver.add(request.index());
+ } else if (lazyRolloverFailureStoreFeature && writeToFailureStore && dataStream.getFailureIndices().isRolloverOnWrite()) {
+ failureStoresToBeRolledOver.add(request.index());
}
}
+
}
}
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
index 2d65bea4ac5c2..18c420d99f525 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java
@@ -83,15 +83,6 @@
* shards are not actually modified).
*/
public class TransportSimulateBulkAction extends TransportAbstractBulkAction {
- public static final NodeFeature SIMULATE_MAPPING_VALIDATION = new NodeFeature("simulate.mapping.validation", true);
- public static final NodeFeature SIMULATE_MAPPING_VALIDATION_TEMPLATES = new NodeFeature("simulate.mapping.validation.templates", true);
- public static final NodeFeature SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS = new NodeFeature(
- "simulate.component.template.substitutions",
- true
- );
- public static final NodeFeature SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS = new NodeFeature("simulate.index.template.substitutions", true);
- public static final NodeFeature SIMULATE_MAPPING_ADDITION = new NodeFeature("simulate.mapping.addition", true);
- public static final NodeFeature SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING = new NodeFeature("simulate.support.non.template.mapping", true);
public static final NodeFeature SIMULATE_IGNORED_FIELDS = new NodeFeature("simulate.ignored.fields");
private final IndicesService indicesService;
diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java
index 578b0a61aafde..2eed45e5afa6d 100644
--- a/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java
+++ b/server/src/main/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingService.java
@@ -23,8 +23,6 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.Index;
import java.util.List;
@@ -43,8 +41,6 @@ public class DataStreamAutoShardingService {
private static final Logger logger = LogManager.getLogger(DataStreamAutoShardingService.class);
public static final String DATA_STREAMS_AUTO_SHARDING_ENABLED = "data_streams.auto_sharding.enabled";
- public static final NodeFeature DATA_STREAM_AUTO_SHARDING_FEATURE = new NodeFeature("data_stream.auto_sharding", true);
-
public static final Setting> DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING = Setting.listSetting(
"data_streams.auto_sharding.excludes",
List.of(),
@@ -101,7 +97,6 @@ public class DataStreamAutoShardingService {
);
private final ClusterService clusterService;
private final boolean isAutoShardingEnabled;
- private final FeatureService featureService;
private final LongSupplier nowSupplier;
private volatile TimeValue increaseShardsCooldown;
private volatile TimeValue reduceShardsCooldown;
@@ -109,12 +104,7 @@ public class DataStreamAutoShardingService {
private volatile int maxWriteThreads;
private volatile List dataStreamExcludePatterns;
- public DataStreamAutoShardingService(
- Settings settings,
- ClusterService clusterService,
- FeatureService featureService,
- LongSupplier nowSupplier
- ) {
+ public DataStreamAutoShardingService(Settings settings, ClusterService clusterService, LongSupplier nowSupplier) {
this.clusterService = clusterService;
this.isAutoShardingEnabled = settings.getAsBoolean(DATA_STREAMS_AUTO_SHARDING_ENABLED, false);
this.increaseShardsCooldown = DATA_STREAMS_AUTO_SHARDING_INCREASE_SHARDS_COOLDOWN.get(settings);
@@ -122,7 +112,6 @@ public DataStreamAutoShardingService(
this.minWriteThreads = CLUSTER_AUTO_SHARDING_MIN_WRITE_THREADS.get(settings);
this.maxWriteThreads = CLUSTER_AUTO_SHARDING_MAX_WRITE_THREADS.get(settings);
this.dataStreamExcludePatterns = DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.get(settings);
- this.featureService = featureService;
this.nowSupplier = nowSupplier;
}
@@ -168,15 +157,6 @@ public AutoShardingResult calculate(ClusterState state, DataStream dataStream, @
return NOT_APPLICABLE_RESULT;
}
- if (featureService.clusterHasFeature(state, DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE) == false) {
- logger.debug(
- "Data stream auto sharding service cannot compute the optimal number of shards for data stream [{}] because the cluster "
- + "doesn't have the auto sharding feature",
- dataStream.getName()
- );
- return NOT_APPLICABLE_RESULT;
- }
-
if (dataStreamExcludePatterns.stream().anyMatch(pattern -> Regex.simpleMatch(pattern, dataStream.getName()))) {
logger.debug(
"Data stream [{}] is excluded from auto sharding via the [{}] setting",
diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
index 943b03588d4ba..634a103e9754a 100644
--- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java
@@ -16,6 +16,7 @@
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.core.TimeValue;
import java.io.IOException;
@@ -27,11 +28,22 @@ public abstract class ClusterInfoRequest getMappings(GetMappingsRequest request)
return execute(GetMappingsAction.INSTANCE, request);
}
- public GetMappingsRequestBuilder prepareGetMappings(String... indices) {
- return new GetMappingsRequestBuilder(this, indices);
+ public GetMappingsRequestBuilder prepareGetMappings(TimeValue masterTimeout, String... indices) {
+ return new GetMappingsRequestBuilder(this, masterTimeout, indices);
}
public void getFieldMappings(GetFieldMappingsRequest request, ActionListener listener) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java
index 673960c713391..17267525d4bdf 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamGlobalRetention.java
@@ -14,7 +14,6 @@
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.NodeFeature;
import java.io.IOException;
@@ -23,7 +22,6 @@
*/
public record DataStreamGlobalRetention(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) implements Writeable {
- public static final NodeFeature GLOBAL_RETENTION = new NodeFeature("data_stream.lifecycle.global_retention", true);
public static final TimeValue MIN_RETENTION_VALUE = TimeValue.timeValueSeconds(10);
/**
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java
index 353f17fe0e00c..8366083b1907e 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java
@@ -23,7 +23,6 @@
import org.elasticsearch.common.unit.Processors;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.UpdateForV9;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.xcontent.ConstructingObjectParser;
import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField;
@@ -46,9 +45,6 @@
public final class DesiredNode implements Writeable, ToXContentObject, Comparable {
- public static final NodeFeature RANGE_FLOAT_PROCESSORS_SUPPORTED = new NodeFeature("desired_node.range_float_processors");
- public static final NodeFeature DESIRED_NODE_VERSION_DEPRECATED = new NodeFeature("desired_node.version_deprecated", true);
-
public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0;
private static final ParseField SETTINGS_FIELD = new ParseField("settings");
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java
index 7fc8a8693dcdc..e24d921ba78fd 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java
@@ -69,7 +69,6 @@
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
@@ -237,6 +236,9 @@ public SystemIndices getSystemIndices() {
* Validate the name for an index or alias against some static rules.
*/
public static void validateIndexOrAliasName(String index, BiFunction exceptionCtor) {
+ if (index == null || index.isEmpty()) {
+ throw exceptionCtor.apply(index, "must not be empty");
+ }
if (Strings.validFileName(index) == false) {
throw exceptionCtor.apply(index, "must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
@@ -1583,7 +1585,7 @@ static void validateCloneIndex(
private static final Set UNMODIFIABLE_SETTINGS_DURING_RESIZE = Set.of(
IndexSettings.MODE.getKey(),
- SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
+ IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(),
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java
deleted file mode 100644
index 49bd38330e3af..0000000000000
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataFeatures.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.cluster.metadata;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class MetadataFeatures implements FeatureSpecification {
- @Override
- public Set getFeatures() {
- return Set.of(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java
index e38cd677991f3..24b14a46c8782 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java
@@ -54,8 +54,6 @@
*/
public abstract class IndexRouting {
- static final NodeFeature BOOLEAN_ROUTING_PATH = new NodeFeature("routing.boolean_routing_path", true);
- static final NodeFeature MULTI_VALUE_ROUTING_PATH = new NodeFeature("routing.multi_value_routing_path", true);
static final NodeFeature LOGSB_ROUTE_ON_SORT_FIELDS = new NodeFeature("routing.logsb_route_on_sort_fields");
/**
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java
index 1545fdf90d111..461ac50e1efc8 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingFeatures.java
@@ -18,7 +18,7 @@ public class RoutingFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(IndexRouting.BOOLEAN_ROUTING_PATH, IndexRouting.MULTI_VALUE_ROUTING_PATH);
+ return Set.of();
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index 5d1e6741c5e22..2f2fd4ef453f6 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -158,9 +158,7 @@ public ClusterState applyStartedShards(ClusterState clusterState, List ass
* The placeholder value for {@link DesiredBalance} when the node stands down as master.
*/
public static final DesiredBalance NOT_MASTER = new DesiredBalance(-2, Map.of());
+
/**
* The starting value for {@link DesiredBalance} when the node becomes the master.
*/
@@ -57,6 +65,10 @@ public static boolean hasChanges(DesiredBalance a, DesiredBalance b) {
return Objects.equals(a.assignments, b.assignments) == false;
}
+ /**
+ * Returns the sum of shard movements needed to reach the new desired balance. Doesn't count new shard copies as a move, nor removal or
+ * unassignment of a shard copy.
+ */
public static int shardMovements(DesiredBalance old, DesiredBalance updated) {
var intersection = Sets.intersection(old.assignments().keySet(), updated.assignments().keySet());
int movements = 0;
@@ -70,8 +82,15 @@ public static int shardMovements(DesiredBalance old, DesiredBalance updated) {
return movements;
}
+ /**
+ * Returns the number of shard movements needed to reach the new shard assignment. Doesn't count new shard copies as a move, nor removal
+ * or unassignment of a shard copy.
+ */
private static int shardMovements(ShardAssignment old, ShardAssignment updated) {
- var movements = Math.min(0, old.assigned() - updated.assigned());// compensate newly started shards
+ // A shard move should retain the same number of assigned nodes, just swap out one node for another. We will compensate for newly
+ // started shards -- adding a shard copy is not a move -- by initializing the count with a negative value so that incrementing later
+ // for a new node zeros out.
+ var movements = Math.min(0, old.assigned() - updated.assigned());
for (String nodeId : updated.nodeIds()) {
if (old.nodeIds().contains(nodeId) == false) {
movements++;
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java
index 3b22221ea7db4..03630c284fa30 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java
@@ -415,11 +415,14 @@ boolean hasEnoughIterations(int currentIteration) {
}
private static Map collectShardAssignments(RoutingNodes routingNodes) {
- final var entries = routingNodes.getAssignedShards().entrySet();
- assert entries.stream().flatMap(t -> t.getValue().stream()).allMatch(ShardRouting::started) : routingNodes;
- final Map res = Maps.newHashMapWithExpectedSize(entries.size());
- for (var shardAndAssignments : entries) {
- res.put(shardAndAssignments.getKey(), ShardAssignment.ofAssignedShards(shardAndAssignments.getValue()));
+ final var allAssignedShards = routingNodes.getAssignedShards().entrySet();
+ assert allAssignedShards.stream().flatMap(t -> t.getValue().stream()).allMatch(ShardRouting::started) : routingNodes;
+ final Map res = Maps.newHashMapWithExpectedSize(allAssignedShards.size());
+ for (var shardIdAndShardRoutings : allAssignedShards) {
+ res.put(
+ shardIdAndShardRoutings.getKey(),
+ ShardAssignment.createFromAssignedShardRoutingsList(shardIdAndShardRoutings.getValue())
+ );
}
return res;
}
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java
index 83b370c1a7928..909a7a7a99a61 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java
@@ -54,6 +54,10 @@ public class DesiredBalanceReconciler {
private static final Logger logger = LogManager.getLogger(DesiredBalanceReconciler.class);
+ /**
+ * The minimum interval that log messages will be written if the number of undesired shard allocations reaches the percentage of total
+ * shards set by {@link #UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING}.
+ */
public static final Setting UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING = Setting.timeSetting(
"cluster.routing.allocation.desired_balance.undesired_allocations.log_interval",
TimeValue.timeValueHours(1),
@@ -62,6 +66,10 @@ public class DesiredBalanceReconciler {
Setting.Property.NodeScope
);
+ /**
+ * Warning log messages may be periodically written if the number of shards that are on undesired nodes reaches this percentage setting.
+ * Works together with {@link #UNDESIRED_ALLOCATIONS_LOG_INTERVAL_SETTING} to log on a periodic basis.
+ */
public static final Setting UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING = Setting.doubleSetting(
"cluster.routing.allocation.desired_balance.undesired_allocations.threshold",
0.1,
@@ -96,6 +104,13 @@ public DesiredBalanceReconciler(
this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
}
+ /**
+ * Applies a desired shard allocation to the routing table by initializing and relocating shards in the cluster state.
+ *
+ * @param desiredBalance The new desired cluster shard allocation
+ * @param allocation Cluster state information with which to make decisions, contains routing table metadata that will be modified to
+ * reach the given desired balance.
+ */
public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) {
var nodeIds = allocation.routingNodes().getAllNodeIds();
allocationOrdering.retainNodes(nodeIds);
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java
index 5be26f0b3e8c7..2c73a27ad3418 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java
@@ -17,6 +17,7 @@
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy;
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@@ -56,11 +57,30 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
private final ShardsAllocator delegateAllocator;
private final ThreadPool threadPool;
+ /**
+ * This is a callback to run {@link AllocationService#executeWithRoutingAllocation(ClusterState, String, RerouteStrategy)}, which
+ * produces a new ClusterState with the changes made by {@link DesiredBalanceReconciler#reconcile}. The {@link RerouteStrategy} provided
+ * to the callback calls into {@link #desiredBalanceReconciler} for the changes. The {@link #masterServiceTaskQueue} will publish the
+ * new cluster state after the cluster state is constructed by the {@link ReconcileDesiredBalanceExecutor}.
+ */
private final DesiredBalanceReconcilerAction reconciler;
private final DesiredBalanceComputer desiredBalanceComputer;
+ /**
+ * Reconciliation ({@link DesiredBalanceReconciler#reconcile(DesiredBalance, RoutingAllocation)}) takes the {@link DesiredBalance}
+ * output of {@link DesiredBalanceComputer#compute} and identifies how shards need to be added, moved or removed to go from the current
+ * cluster shard allocation to the new desired allocation.
+ */
private final DesiredBalanceReconciler desiredBalanceReconciler;
private final ContinuousComputation desiredBalanceComputation;
- private final PendingListenersQueue queue;
+ /**
+ * Saves and runs listeners after DesiredBalance computations complete.
+ */
+ private final PendingListenersQueue pendingListenersQueue;
+ /**
+ * Each reroute request gets assigned a monotonically increasing sequence number. Many reroute requests may arrive before the balancer
+ * asynchronously runs a computation. The balancer will use the latest request and save this sequence number to track back to the
+ * request.
+ */
private final AtomicLong indexGenerator = new AtomicLong(-1);
private final ConcurrentLinkedQueue> pendingDesiredBalanceMoves = new ConcurrentLinkedQueue<>();
private final MasterServiceTaskQueue masterServiceTaskQueue;
@@ -199,7 +219,7 @@ public String toString() {
return "DesiredBalanceShardsAllocator#allocate";
}
};
- this.queue = new PendingListenersQueue();
+ this.pendingListenersQueue = new PendingListenersQueue();
this.masterServiceTaskQueue = clusterService.createTaskQueue(
"reconcile-desired-balance",
Priority.URGENT,
@@ -235,7 +255,7 @@ public void allocate(RoutingAllocation allocation, ActionListener listener
var index = indexGenerator.incrementAndGet();
logger.debug("Executing allocate for [{}]", index);
- queue.add(index, listener);
+ pendingListenersQueue.add(index, listener);
// This can only run on master, so unset not-master if exists
if (currentDesiredBalanceRef.compareAndSet(DesiredBalance.NOT_MASTER, DesiredBalance.BECOME_MASTER_INITIAL)) {
logger.debug("initialized desired balance for becoming master");
@@ -378,7 +398,7 @@ public DesiredBalanceStats getStats() {
private void onNoLongerMaster() {
if (indexGenerator.getAndSet(-1) != -1) {
currentDesiredBalanceRef.set(DesiredBalance.NOT_MASTER);
- queue.completeAllAsNotMaster();
+ pendingListenersQueue.completeAllAsNotMaster();
pendingDesiredBalanceMoves.clear();
desiredBalanceReconciler.clear();
desiredBalanceMetrics.zeroAllMetrics();
@@ -428,7 +448,7 @@ private ClusterState applyBalance(
batchExecutionContext.initialState(),
createReconcileAllocationAction(latest.getTask().desiredBalance)
);
- latest.success(() -> queue.complete(latest.getTask().desiredBalance.lastConvergedIndex()));
+ latest.success(() -> pendingListenersQueue.complete(latest.getTask().desiredBalance.lastConvergedIndex()));
return newState;
}
}
@@ -447,7 +467,7 @@ private static void discardSupersededTasks(
// only for tests - in production, this happens after reconciliation
protected final void completeToLastConvergedIndex() {
- queue.complete(currentDesiredBalanceRef.get().lastConvergedIndex());
+ pendingListenersQueue.complete(currentDesiredBalanceRef.get().lastConvergedIndex());
}
private void recordTime(CounterMetric metric, Runnable action) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java
index e1b58cf79ac09..5b14277f2c651 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/PendingListenersQueue.java
@@ -24,6 +24,10 @@
import static org.elasticsearch.cluster.service.ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME;
import static org.elasticsearch.cluster.service.MasterService.MASTER_UPDATE_THREAD_NAME;
+/**
+ * Registers listeners with an `index` number ({@link #add(long, ActionListener)}) and then completes them whenever the latest index number
+ * is greater or equal to a listener's index value ({@link #complete(long)}).
+ */
public class PendingListenersQueue {
private static final Logger logger = LogManager.getLogger(PendingListenersQueue.class);
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java
index 4fb9137cb4544..2bd1b9bb2bb64 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardAssignment.java
@@ -17,6 +17,14 @@
import static java.util.Collections.unmodifiableSet;
+/**
+ * Simple shard assignment summary of shard copies for a particular index shard.
+ *
+ * @param nodeIds The node IDs of nodes holding a shard copy.
+ * @param total The total number of shard copies.
+ * @param unassigned The number of unassigned shard copies.
+ * @param ignored The number of ignored shard copies.
+ */
public record ShardAssignment(Set nodeIds, int total, int unassigned, int ignored) {
public ShardAssignment {
@@ -28,9 +36,13 @@ public int assigned() {
return nodeIds.size();
}
- public static ShardAssignment ofAssignedShards(List routings) {
+ /**
+ * Helper method to instantiate a new ShardAssignment from a given list of ShardRouting instances. Assumes all shards are assigned.
+ */
+ public static ShardAssignment createFromAssignedShardRoutingsList(List routings) {
var nodeIds = new LinkedHashSet();
for (ShardRouting routing : routings) {
+ assert routing.unassignedInfo() == null : "Expected assigned shard copies only, unassigned info: " + routing.unassignedInfo();
nodeIds.add(routing.currentNodeId());
}
return new ShardAssignment(unmodifiableSet(nodeIds), routings.size(), 0, 0);
diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
index c34d0d19988c8..05d4b29f8f28f 100644
--- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java
@@ -158,6 +158,31 @@ public void run() {
}
}
+ private record TimedListener(ActionListener listener, Recorder recorder) implements ActionListener {
+
+ @Override
+ public void onResponse(Void response) {
+ try (Releasable ignored = recorder.record("listener.onResponse")) {
+ listener.onResponse(null);
+ } catch (Exception e) {
+ assert false : e;
+ logger.error("exception thrown by listener.onResponse", e);
+ }
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ assert e != null;
+ try (Releasable ignored = recorder.record("listener.onFailure")) {
+ listener.onFailure(e);
+ } catch (Exception inner) {
+ e.addSuppressed(inner);
+ assert false : e;
+ logger.error(() -> "exception thrown by listener.onFailure", e);
+ }
+ }
+ }
+
@Override
protected synchronized void doStop() {
for (Map.Entry onGoingTimeout : timeoutClusterStateListeners.entrySet()) {
@@ -394,12 +419,14 @@ private void runTask(String source, Function updateF
final long startTimeMillis = threadPool.relativeTimeInMillis();
final Recorder stopWatch = new Recorder(threadPool, slowTaskThreadDumpTimeout);
+ final TimedListener timedListener = new TimedListener(clusterApplyListener, stopWatch);
final ClusterState newClusterState;
try {
try (Releasable ignored = stopWatch.record("running task [" + source + ']')) {
newClusterState = updateFunction.apply(previousClusterState);
}
} catch (Exception e) {
+ timedListener.onFailure(e);
TimeValue executionTime = getTimeSince(startTimeMillis);
logger.trace(
() -> format(
@@ -412,15 +439,14 @@ private void runTask(String source, Function updateF
e
);
warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch);
- clusterApplyListener.onFailure(e);
return;
}
if (previousClusterState == newClusterState) {
+ timedListener.onResponse(null);
TimeValue executionTime = getTimeSince(startTimeMillis);
logger.debug("processing [{}]: took [{}] no change in cluster state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch);
- clusterApplyListener.onResponse(null);
} else {
if (logger.isTraceEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]\n{}", newClusterState.version(), source, newClusterState);
@@ -430,6 +456,7 @@ private void runTask(String source, Function updateF
try {
setIsApplyingClusterState();
applyChanges(previousClusterState, newClusterState, source, stopWatch);
+ timedListener.onResponse(null);
TimeValue executionTime = getTimeSince(startTimeMillis);
logger.debug(
"processing [{}]: took [{}] done applying updated cluster state (version: {}, uuid: {})",
@@ -439,8 +466,11 @@ private void runTask(String source, Function updateF
newClusterState.stateUUID()
);
warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch);
- clusterApplyListener.onResponse(null);
} catch (Exception e) {
+ // failing to apply a cluster state with an exception indicates a bug in validation or in one of the appliers; if we
+ // continue we will retry with the same cluster state but that might not help.
+ assert applicationMayFail();
+ timedListener.onFailure(e);
TimeValue executionTime = getTimeSince(startTimeMillis);
if (logger.isTraceEnabled()) {
logger.warn(() -> format("""
@@ -460,10 +490,6 @@ private void runTask(String source, Function updateF
e
);
}
- // failing to apply a cluster state with an exception indicates a bug in validation or in one of the appliers; if we
- // continue we will retry with the same cluster state but that might not help.
- assert applicationMayFail();
- clusterApplyListener.onFailure(e);
} finally {
clearIsApplyingClusterState();
}
diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
index afee2491672ef..3c1f53ca4a2c9 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
@@ -36,7 +36,6 @@
import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper;
import org.elasticsearch.index.mapper.InferenceMetadataFieldsMapper;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.FsDirectoryFactory;
import org.elasticsearch.index.store.Store;
@@ -191,7 +190,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
FieldMapper.SYNTHETIC_SOURCE_KEEP_INDEX_SETTING,
IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_WRITE_SETTING,
IgnoredSourceFieldMapper.SKIP_IGNORED_SOURCE_READ_SETTING,
- SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING,
+ IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING,
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING,
InferenceMetadataFieldsMapper.USE_LEGACY_SEMANTIC_TEXT_FORMAT,
diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java
index aec9c108d898d..16c6844f46402 100644
--- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java
+++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java
@@ -1727,7 +1727,7 @@ public static > Setting enumSetting(
*
* @param key the key for the setting
* @param defaultValue the default value for this setting
- * @param properties properties properties for this setting like scope, filtering...
+ * @param properties properties for this setting like scope, filtering...
* @return the setting object
*/
public static Setting memorySizeSetting(String key, ByteSizeValue defaultValue, Property... properties) {
diff --git a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java b/server/src/main/java/org/elasticsearch/health/HealthFeatures.java
deleted file mode 100644
index 72fc955320b94..0000000000000
--- a/server/src/main/java/org/elasticsearch/health/HealthFeatures.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.health;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class HealthFeatures implements FeatureSpecification {
-
- public static final NodeFeature SUPPORTS_EXTENDED_REPOSITORY_INDICATOR = new NodeFeature("health.extended_repository_indicator", true);
-
- @Override
- public Set getFeatures() {
- return Set.of(SUPPORTS_EXTENDED_REPOSITORY_INDICATOR);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
index aab9e972cba73..113e789727f0a 100644
--- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
+++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java
@@ -24,7 +24,6 @@
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.RunOnce;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.metadata.HealthMetadata;
import org.elasticsearch.health.node.action.HealthNodeNotDiscoveredException;
import org.elasticsearch.health.node.selection.HealthNode;
@@ -62,7 +61,6 @@ public class LocalHealthMonitor implements ClusterStateListener {
private final ClusterService clusterService;
private final ThreadPool threadPool;
private final Client client;
- private final FeatureService featureService;
private volatile TimeValue monitorInterval;
private volatile boolean enabled;
@@ -88,7 +86,6 @@ private LocalHealthMonitor(
ClusterService clusterService,
ThreadPool threadPool,
Client client,
- FeatureService featureService,
List> healthTrackers
) {
this.threadPool = threadPool;
@@ -96,7 +93,6 @@ private LocalHealthMonitor(
this.enabled = HealthNodeTaskExecutor.ENABLED_SETTING.get(settings);
this.clusterService = clusterService;
this.client = client;
- this.featureService = featureService;
this.healthTrackers = healthTrackers;
}
@@ -105,17 +101,9 @@ public static LocalHealthMonitor create(
ClusterService clusterService,
ThreadPool threadPool,
Client client,
- FeatureService featureService,
List> healthTrackers
) {
- LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(
- settings,
- clusterService,
- threadPool,
- client,
- featureService,
- healthTrackers
- );
+ LocalHealthMonitor localHealthMonitor = new LocalHealthMonitor(settings, clusterService, threadPool, client, healthTrackers);
localHealthMonitor.registerListeners();
return localHealthMonitor;
}
diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java
index f0814577bd203..7287a0bf307b9 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexMode.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java
@@ -623,10 +623,7 @@ public Settings getAdditionalIndexSettings(
}
}
if (indexMode == LOOKUP) {
- return Settings.builder()
- .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
- .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")
- .build();
+ return Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build();
} else {
return Settings.EMPTY;
}
diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java
index 68f334b10ea52..284140460a437 100644
--- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java
+++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java
@@ -25,7 +25,6 @@
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
@@ -52,7 +51,6 @@
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING;
import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING;
-import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING;
/**
* This class encapsulates all index level settings and handles settings updates.
@@ -655,48 +653,6 @@ public Iterator> settings() {
Property.Final
);
- public static final Setting RECOVERY_USE_SYNTHETIC_SOURCE_SETTING = Setting.boolSetting(
- "index.recovery.use_synthetic_source",
- false,
- new Setting.Validator<>() {
- @Override
- public void validate(Boolean value) {}
-
- @Override
- public void validate(Boolean enabled, Map, Object> settings) {
- if (enabled == false) {
- return;
- }
-
- // Verify if synthetic source is enabled on the index; fail if it is not
- var indexMode = (IndexMode) settings.get(MODE);
- if (indexMode.defaultSourceMode() != SourceFieldMapper.Mode.SYNTHETIC) {
- var sourceMode = (SourceFieldMapper.Mode) settings.get(INDEX_MAPPER_SOURCE_MODE_SETTING);
- if (sourceMode != SourceFieldMapper.Mode.SYNTHETIC) {
- throw new IllegalArgumentException(
- String.format(
- Locale.ROOT,
- "The setting [%s] is only permitted when [%s] is set to [%s]. Current mode: [%s].",
- RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
- INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
- SourceFieldMapper.Mode.SYNTHETIC.name(),
- sourceMode.name()
- )
- );
- }
- }
- }
-
- @Override
- public Iterator> settings() {
- List> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, MODE);
- return res.iterator();
- }
- },
- Property.IndexScope,
- Property.Final
- );
-
/**
* Returns true if TSDB encoding is enabled. The default is true
*/
@@ -753,6 +709,60 @@ public Iterator> settings() {
Property.ServerlessPublic
);
+ public static final Setting INDEX_MAPPER_SOURCE_MODE_SETTING = Setting.enumSetting(
+ SourceFieldMapper.Mode.class,
+ settings -> {
+ final IndexMode indexMode = IndexSettings.MODE.get(settings);
+ return indexMode.defaultSourceMode().name();
+ },
+ "index.mapping.source.mode",
+ value -> {},
+ Setting.Property.Final,
+ Setting.Property.IndexScope
+ );
+
+ public static final Setting RECOVERY_USE_SYNTHETIC_SOURCE_SETTING = Setting.boolSetting(
+ "index.recovery.use_synthetic_source",
+ false,
+ new Setting.Validator<>() {
+ @Override
+ public void validate(Boolean value) {}
+
+ @Override
+ public void validate(Boolean enabled, Map, Object> settings) {
+ if (enabled == false) {
+ return;
+ }
+
+ // Verify if synthetic source is enabled on the index; fail if it is not
+ var indexMode = (IndexMode) settings.get(MODE);
+ if (indexMode.defaultSourceMode() != SourceFieldMapper.Mode.SYNTHETIC) {
+ var sourceMode = (SourceFieldMapper.Mode) settings.get(INDEX_MAPPER_SOURCE_MODE_SETTING);
+ if (sourceMode != SourceFieldMapper.Mode.SYNTHETIC) {
+ throw new IllegalArgumentException(
+ String.format(
+ Locale.ROOT,
+ "The setting [%s] is only permitted when [%s] is set to [%s]. Current mode: [%s].",
+ RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
+ INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
+ SourceFieldMapper.Mode.SYNTHETIC.name(),
+ sourceMode.name()
+ )
+ );
+ }
+ }
+ }
+
+ @Override
+ public Iterator> settings() {
+ List> res = List.of(INDEX_MAPPER_SOURCE_MODE_SETTING, MODE);
+ return res.iterator();
+ }
+ },
+ Property.IndexScope,
+ Property.Final
+ );
+
/**
* Legacy index setting, kept for 7.x BWC compatibility. This setting has no effect in 8.x. Do not use.
* TODO: Remove in 9.0
@@ -806,8 +816,6 @@ private static String getIgnoreAboveDefaultValue(final Settings settings) {
}
}
- public static final NodeFeature IGNORE_ABOVE_INDEX_LEVEL_SETTING = new NodeFeature("mapper.ignore_above_index_level_setting", true);
-
private final Index index;
private final IndexVersion version;
private final Logger logger;
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java
index 9de463ec5f6f6..77de1654cf4ba 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java
@@ -26,7 +26,6 @@
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.core.Booleans;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.fielddata.FieldDataContext;
@@ -62,8 +61,6 @@ public class BooleanFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "boolean";
- static final NodeFeature BOOLEAN_DIMENSION = new NodeFeature("mapper.boolean_dimension", true);
-
public static class Values {
public static final BytesRef TRUE = new BytesRef("T");
public static final BytesRef FALSE = new BytesRef("F");
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
index 068a9828809dd..cf0c355a22e65 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
@@ -12,7 +12,6 @@
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexSortConfig;
@@ -22,8 +21,6 @@
import java.util.List;
public class DocumentMapper {
- static final NodeFeature INDEX_SORTING_ON_NESTED = new NodeFeature("mapper.index_sorting_on_nested", true);
-
private final String type;
private final CompressedXContent mappingSource;
private final MappingLookup mappingLookup;
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java
index e03494dcb5926..bdb3d97d4c187 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java
@@ -56,7 +56,6 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper {
public static final TypeParser PARSER = new FixedTypeParser(context -> new IgnoredSourceFieldMapper(context.getIndexSettings()));
- static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source", true);
static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots");
static final NodeFeature IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD = new NodeFeature(
"mapper.ignored_source_as_top_level_metadata_array_field"
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java
index e539c07caef61..9708753926e1d 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexModeFieldMapper.java
@@ -13,7 +13,6 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.regex.Regex;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.fielddata.FieldData;
import org.elasticsearch.index.fielddata.FieldDataContext;
import org.elasticsearch.index.fielddata.IndexFieldData;
@@ -31,8 +30,6 @@
public class IndexModeFieldMapper extends MetadataFieldMapper {
- static final NodeFeature QUERYING_INDEX_MODE = new NodeFeature("mapper.query_index_mode", true);
-
public static final String NAME = "_index_mode";
public static final String CONTENT_TYPE = "_index_mode";
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
index a26a4bb80d50e..bdcf9bf98279f 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java
@@ -38,7 +38,6 @@
import org.elasticsearch.common.lucene.search.AutomatonQueries;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
@@ -89,9 +88,6 @@ public final class KeywordFieldMapper extends FieldMapper {
public static final String CONTENT_TYPE = "keyword";
- static final NodeFeature KEYWORD_DIMENSION_IGNORE_ABOVE = new NodeFeature("mapper.keyword_dimension_ignore_above", true);
- static final NodeFeature KEYWORD_NORMALIZER_SYNTHETIC_SOURCE = new NodeFeature("mapper.keyword_normalizer_synthetic_source", true);
-
public static class Defaults {
public static final FieldType FIELD_TYPE;
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java
index f293ced122d23..bafa74b662f00 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java
@@ -13,7 +13,6 @@
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.util.StringLiteralDeduplicator;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
@@ -30,8 +29,6 @@
public abstract class Mapper implements ToXContentFragment, Iterable {
- public static final NodeFeature SYNTHETIC_SOURCE_KEEP_FEATURE = new NodeFeature("mapper.synthetic_source_keep", true);
-
public static final String SYNTHETIC_SOURCE_KEEP_PARAM = "synthetic_source_keep";
// Only relevant for synthetic source mode.
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java
index 5dbaf0e0f40ad..8e669a91fd9ea 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java
@@ -11,9 +11,6 @@
import org.elasticsearch.features.FeatureSpecification;
import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper;
-import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper;
import java.util.Set;
@@ -28,33 +25,7 @@ public class MapperFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(
- BWC_WORKAROUND_9_0,
- IgnoredSourceFieldMapper.TRACK_IGNORED_SOURCE,
- PassThroughObjectMapper.PASS_THROUGH_PRIORITY,
- RangeFieldMapper.NULL_VALUES_OFF_BY_ONE_FIX,
- SourceFieldMapper.SYNTHETIC_SOURCE_FALLBACK,
- DenseVectorFieldMapper.INT4_QUANTIZATION,
- DenseVectorFieldMapper.BIT_VECTORS,
- DocumentMapper.INDEX_SORTING_ON_NESTED,
- KeywordFieldMapper.KEYWORD_DIMENSION_IGNORE_ABOVE,
- IndexModeFieldMapper.QUERYING_INDEX_MODE,
- NodeMappingStats.SEGMENT_LEVEL_FIELDS_STATS,
- BooleanFieldMapper.BOOLEAN_DIMENSION,
- ObjectMapper.SUBOBJECTS_AUTO,
- ObjectMapper.SUBOBJECTS_AUTO_FIXES,
- KeywordFieldMapper.KEYWORD_NORMALIZER_SYNTHETIC_SOURCE,
- SourceFieldMapper.SYNTHETIC_SOURCE_STORED_FIELDS_ADVANCE_FIX,
- Mapper.SYNTHETIC_SOURCE_KEEP_FEATURE,
- SourceFieldMapper.SYNTHETIC_SOURCE_WITH_COPY_TO_AND_DOC_VALUES_FALSE_SUPPORT,
- SourceFieldMapper.SYNTHETIC_SOURCE_COPY_TO_FIX,
- FlattenedFieldMapper.IGNORE_ABOVE_SUPPORT,
- IndexSettings.IGNORE_ABOVE_INDEX_LEVEL_SETTING,
- SourceFieldMapper.SYNTHETIC_SOURCE_COPY_TO_INSIDE_OBJECTS_FIX,
- TimeSeriesRoutingHashFieldMapper.TS_ROUTING_HASH_FIELD_PARSES_BYTES_REF,
- FlattenedFieldMapper.IGNORE_ABOVE_WITH_ARRAYS_SUPPORT,
- DenseVectorFieldMapper.BBQ_FORMAT
- );
+ return Set.of(BWC_WORKAROUND_9_0);
}
public static final NodeFeature CONSTANT_KEYWORD_SYNTHETIC_SOURCE_WRITE_FIX = new NodeFeature(
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java
index e7ca7367832b6..0987c6dfb8c8b 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java
@@ -15,7 +15,6 @@
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -28,8 +27,6 @@
*/
public class NodeMappingStats implements Writeable, ToXContentFragment {
- public static final NodeFeature SEGMENT_LEVEL_FIELDS_STATS = new NodeFeature("mapper.segment_level_fields_stats", true);
-
private static final class Fields {
static final String MAPPINGS = "mappings";
static final String TOTAL_COUNT = "total_count";
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
index e734a8e5b4377..86ce4fbb74837 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java
@@ -20,7 +20,6 @@
import org.elasticsearch.common.util.FeatureFlag;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
@@ -52,9 +51,6 @@ public class ObjectMapper extends Mapper {
public static final String CONTENT_TYPE = "object";
static final String STORE_ARRAY_SOURCE_PARAM = "store_array_source";
- static final NodeFeature SUBOBJECTS_AUTO = new NodeFeature("mapper.subobjects_auto", true);
- // No-op. All uses of this feature were reverted but node features can't be removed.
- static final NodeFeature SUBOBJECTS_AUTO_FIXES = new NodeFeature("mapper.subobjects_auto_fixes", true);
/**
* Enhances the previously boolean option for subobjects support with an intermediate mode `auto` that uses
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
index d16acab11a508..fbf8dd4538037 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/PassThroughObjectMapper.java
@@ -10,7 +10,6 @@
package org.elasticsearch.index.mapper;
import org.elasticsearch.common.Explicit;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -39,8 +38,6 @@ public class PassThroughObjectMapper extends ObjectMapper {
public static final String CONTENT_TYPE = "passthrough";
public static final String PRIORITY_PARAM_NAME = "priority";
- static final NodeFeature PASS_THROUGH_PRIORITY = new NodeFeature("mapper.pass_through_priority", true);
-
public static class Builder extends ObjectMapper.Builder {
// Controls whether subfields are configured as time-series dimensions.
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java
index 36f61311ddfc7..461ad74a9434d 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java
@@ -51,7 +51,6 @@
/** A {@link FieldMapper} for indexing numeric and date ranges, and creating queries */
public class RangeFieldMapper extends FieldMapper {
- public static final NodeFeature NULL_VALUES_OFF_BY_ONE_FIX = new NodeFeature("mapper.range.null_values_off_by_one_fix", true);
public static final NodeFeature DATE_RANGE_INDEXING_FIX = new NodeFeature("mapper.range.date_range_indexing_fix");
public static final boolean DEFAULT_INCLUDE_UPPER = true;
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java
index 29acdf200692d..44c0193206efc 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java
@@ -18,7 +18,6 @@
import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
-import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.core.Nullable;
@@ -41,20 +40,6 @@
import java.util.Locale;
public class SourceFieldMapper extends MetadataFieldMapper {
- public static final NodeFeature SYNTHETIC_SOURCE_FALLBACK = new NodeFeature("mapper.source.synthetic_source_fallback", true);
- public static final NodeFeature SYNTHETIC_SOURCE_STORED_FIELDS_ADVANCE_FIX = new NodeFeature(
- "mapper.source.synthetic_source_stored_fields_advance_fix",
- true
- );
- public static final NodeFeature SYNTHETIC_SOURCE_WITH_COPY_TO_AND_DOC_VALUES_FALSE_SUPPORT = new NodeFeature(
- "mapper.source.synthetic_source_with_copy_to_and_doc_values_false",
- true
- );
- public static final NodeFeature SYNTHETIC_SOURCE_COPY_TO_FIX = new NodeFeature("mapper.source.synthetic_source_copy_to_fix", true);
- public static final NodeFeature SYNTHETIC_SOURCE_COPY_TO_INSIDE_OBJECTS_FIX = new NodeFeature(
- "mapper.source.synthetic_source_copy_to_inside_objects_fix",
- true
- );
public static final NodeFeature REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION = new NodeFeature(
"mapper.source.remove_synthetic_source_only_validation"
);
@@ -70,11 +55,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
public static final String LOSSY_PARAMETERS_ALLOWED_SETTING_NAME = "index.lossy.source-mapping-parameters";
- public static final Setting INDEX_MAPPER_SOURCE_MODE_SETTING = Setting.enumSetting(SourceFieldMapper.Mode.class, settings -> {
- final IndexMode indexMode = IndexSettings.MODE.get(settings);
- return indexMode.defaultSourceMode().name();
- }, "index.mapping.source.mode", value -> {}, Setting.Property.Final, Setting.Property.IndexScope);
-
public static final String DEPRECATION_WARNING = "Configuring source mode in mappings is deprecated and will be removed "
+ "in future versions. Use [index.mapping.source.mode] index setting instead.";
@@ -264,8 +244,8 @@ public SourceFieldMapper build() {
private Mode resolveSourceMode() {
// If the `index.mapping.source.mode` exists it takes precedence to determine the source mode for `_source`
// otherwise the mode is determined according to `_source.mode`.
- if (INDEX_MAPPER_SOURCE_MODE_SETTING.exists(settings)) {
- return INDEX_MAPPER_SOURCE_MODE_SETTING.get(settings);
+ if (IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.exists(settings)) {
+ return IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(settings);
}
// If `_source.mode` is not set we need to apply a default according to index mode.
@@ -297,7 +277,7 @@ private static SourceFieldMapper resolveStaticInstance(final Mode sourceMode) {
return DEFAULT;
}
- final Mode settingSourceMode = INDEX_MAPPER_SOURCE_MODE_SETTING.get(c.getSettings());
+ final Mode settingSourceMode = IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(c.getSettings());
// Needed for bwc so that "mode" is not serialized in case of standard index with stored source.
if (indexMode == IndexMode.STANDARD && settingSourceMode == Mode.STORED) {
return DEFAULT;
@@ -482,11 +462,11 @@ public boolean isSynthetic() {
}
public static boolean isSynthetic(IndexSettings indexSettings) {
- return INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == SourceFieldMapper.Mode.SYNTHETIC;
+ return IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == SourceFieldMapper.Mode.SYNTHETIC;
}
public static boolean isStored(IndexSettings indexSettings) {
- return INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == Mode.STORED;
+ return IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.get(indexSettings.getSettings()) == Mode.STORED;
}
public boolean isDisabled() {
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java
index 2a7069c5a52e3..4a6ba5d1fa800 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesRoutingHashFieldMapper.java
@@ -15,7 +15,6 @@
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.ByteUtils;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.fielddata.FieldData;
@@ -47,10 +46,6 @@ public class TimeSeriesRoutingHashFieldMapper extends MetadataFieldMapper {
public static final TimeSeriesRoutingHashFieldMapper INSTANCE = new TimeSeriesRoutingHashFieldMapper();
public static final TypeParser PARSER = new FixedTypeParser(c -> c.getIndexSettings().getMode().timeSeriesRoutingHashFieldMapper());
- static final NodeFeature TS_ROUTING_HASH_FIELD_PARSES_BYTES_REF = new NodeFeature(
- "tsdb.ts_routing_hash_doc_value_parse_byte_ref",
- true
- );
public static final DocValueFormat TS_ROUTING_HASH_DOC_VALUE_FORMAT = TimeSeriesRoutingHashFieldType.DOC_VALUE_FORMAT;
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java
index fc3f297f97252..7ef12f6dd30d2 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapper.java
@@ -38,7 +38,6 @@
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.fielddata.FieldData;
import org.elasticsearch.index.fielddata.FieldDataContext;
@@ -112,12 +111,6 @@
*/
public final class FlattenedFieldMapper extends FieldMapper {
- public static final NodeFeature IGNORE_ABOVE_SUPPORT = new NodeFeature("flattened.ignore_above_support", true);
- public static final NodeFeature IGNORE_ABOVE_WITH_ARRAYS_SUPPORT = new NodeFeature(
- "mapper.flattened.ignore_above_with_arrays_support",
- true
- );
-
public static final String CONTENT_TYPE = "flattened";
public static final String KEYED_FIELD_SUFFIX = "._keyed";
public static final String KEYED_IGNORED_VALUES_FIELD_SUFFIX = "._keyed._ignored";
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java
index 3e0656205b976..5edff48577efc 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java
@@ -38,7 +38,6 @@
import org.apache.lucene.util.VectorUtil;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.codec.vectors.ES813FlatVectorFormat;
@@ -108,10 +107,6 @@ public static boolean isNotUnitVector(float magnitude) {
return Math.abs(magnitude - 1.0f) > EPS;
}
- public static final NodeFeature INT4_QUANTIZATION = new NodeFeature("mapper.vectors.int4_quantization", true);
- public static final NodeFeature BIT_VECTORS = new NodeFeature("mapper.vectors.bit_vectors", true);
- public static final NodeFeature BBQ_FORMAT = new NodeFeature("mapper.vectors.bbq", true);
-
public static final IndexVersion MAGNITUDE_STORED_INDEX_VERSION = IndexVersions.V_7_5_0;
public static final IndexVersion INDEXED_BY_DEFAULT_INDEX_VERSION = IndexVersions.FIRST_DETACHED_INDEX_VERSION;
public static final IndexVersion NORMALIZE_COSINE = IndexVersions.NORMALIZED_VECTOR_COSINE;
diff --git a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java
index 5bf5182af336e..56c08f2b2fb85 100644
--- a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java
+++ b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java
@@ -13,7 +13,6 @@
import org.elasticsearch.TransportVersions;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.persistent.PersistentTaskParams;
import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.XContentBuilder;
@@ -36,10 +35,6 @@ private EnterpriseGeoIpTask() {
}
public static final String ENTERPRISE_GEOIP_DOWNLOADER = "enterprise-geoip-downloader";
- public static final NodeFeature GEOIP_DOWNLOADER_DATABASE_CONFIGURATION = new NodeFeature(
- "geoip.downloader.database.configuration",
- true
- );
public static class EnterpriseGeoIpTaskParams implements PersistentTaskParams {
diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java b/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java
deleted file mode 100644
index 7c12b180b4607..0000000000000
--- a/server/src/main/java/org/elasticsearch/ingest/IngestGeoIpFeatures.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.ingest;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-import static org.elasticsearch.ingest.EnterpriseGeoIpTask.GEOIP_DOWNLOADER_DATABASE_CONFIGURATION;
-
-public class IngestGeoIpFeatures implements FeatureSpecification {
-
- public static final NodeFeature GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE = new NodeFeature(
- "get_database_configuration_action.multi_node",
- true
- );
-
- public static final NodeFeature PUT_DATABASE_CONFIGURATION_ACTION_IPINFO = new NodeFeature(
- "put_database_configuration_action.ipinfo",
- true
- );
-
- public Set getFeatures() {
- return Set.of(
- GEOIP_DOWNLOADER_DATABASE_CONFIGURATION,
- GET_DATABASE_CONFIGURATION_ACTION_MULTI_NODE,
- PUT_DATABASE_CONFIGURATION_ACTION_IPINFO
- );
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
index 5e9f7bb99b31c..4693b4fcf718a 100644
--- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
+++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java
@@ -1200,7 +1200,6 @@ public Map searchFields() {
DataStreamAutoShardingService dataStreamAutoShardingService = new DataStreamAutoShardingService(
settings,
clusterService,
- featureService,
threadPool::absoluteTimeInMillis
);
dataStreamAutoShardingService.init();
@@ -1351,7 +1350,7 @@ private Module loadDiagnosticServices(
var serverHealthIndicatorServices = Stream.of(
new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService),
- new RepositoryIntegrityHealthIndicatorService(clusterService, featureService),
+ new RepositoryIntegrityHealthIndicatorService(clusterService),
new DiskHealthIndicatorService(clusterService, featureService),
new ShardsCapacityHealthIndicatorService(clusterService, featureService),
fileSettingsHealthIndicatorService
@@ -1376,14 +1375,7 @@ private Module loadDiagnosticServices(
new DiskHealthTracker(nodeService, clusterService),
new RepositoriesHealthTracker(repositoriesService)
);
- LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create(
- settings,
- clusterService,
- threadPool,
- client,
- featureService,
- healthTrackers
- );
+ LocalHealthMonitor localHealthMonitor = LocalHealthMonitor.create(settings, clusterService, threadPool, client, healthTrackers);
HealthInfoCache nodeHealthOverview = HealthInfoCache.create(clusterService);
return b -> {
diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java
index de56ead9b5aba..1a169699d4131 100644
--- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java
+++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java
@@ -22,9 +22,7 @@
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
-import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.env.Environment;
-import org.elasticsearch.reservedstate.service.FileSettingsFeatures;
import org.elasticsearch.reservedstate.service.FileSettingsService;
import org.elasticsearch.shutdown.PluginShutdownService;
import org.elasticsearch.transport.BindTransportException;
@@ -280,22 +278,7 @@ private boolean isMasterElected(ClusterState clusterState) {
// protected to allow mock service to override
protected boolean areFileSettingsApplied(ClusterState clusterState) {
ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE);
- if (fileSettingsMetadata == null) {
- // In order to block readiness on file settings being applied, we need to know that the master node has written an initial
- // version, or a marker that file settings don't exist. When upgrading from a version that did not have file settings, the
- // current master node may not be the first node upgraded. To be safe, we wait to consider file settings application for
- // readiness until the whole cluster supports file settings. Note that this only applies when no reserved state metadata
- // exists, so either we are starting up a current cluster (and the feature will be found) or we are upgrading from
- // a version before file settings existed (before 8.4).
- return supportsFileSettings(clusterState) == false;
- } else {
- return fileSettingsMetadata.version().equals(ReservedStateMetadata.NO_VERSION) == false;
- }
- }
-
- @SuppressForbidden(reason = "need to check file settings support on exact cluster state")
- private boolean supportsFileSettings(ClusterState clusterState) {
- return clusterState.clusterFeatures().clusterHasFeature(clusterState.nodes(), FileSettingsFeatures.FILE_SETTINGS_SUPPORTED);
+ return fileSettingsMetadata != null && fileSettingsMetadata.version().equals(ReservedStateMetadata.NO_VERSION) == false;
}
private void setReady(boolean ready) {
diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java
deleted file mode 100644
index b6dea6a2003fc..0000000000000
--- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.repositories;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class RepositoriesFeatures implements FeatureSpecification {
- public static final NodeFeature SUPPORTS_REPOSITORIES_USAGE_STATS = new NodeFeature("repositories.supports_usage_stats", true);
-
- @Override
- public Set getFeatures() {
- return Set.of(SUPPORTS_REPOSITORIES_USAGE_STATS);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsFeatures.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsFeatures.java
deleted file mode 100644
index a60f525be988a..0000000000000
--- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsFeatures.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.reservedstate.service;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-
-import java.util.Set;
-
-public class FileSettingsFeatures implements FeatureSpecification {
-
- // Although file settings were supported starting in 8.4.0, this is really about whether file settings
- // are used in readiness.
- public static final NodeFeature FILE_SETTINGS_SUPPORTED = new NodeFeature("file_settings", true);
-
- @Override
- public Set getFeatures() {
- return Set.of(FILE_SETTINGS_SUPPORTED);
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java b/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java
index 07cc73f4da2b1..31758be719a66 100644
--- a/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java
+++ b/server/src/main/java/org/elasticsearch/rest/RestCompatibleVersionHelper.java
@@ -11,7 +11,6 @@
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.RestApiVersion;
-import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.xcontent.MediaType;
import org.elasticsearch.xcontent.ParsedMediaType;
@@ -27,7 +26,6 @@ class RestCompatibleVersionHelper {
/**
* @return The requested API version, or {@link Optional#empty()} if there was no explicit version in the request.
*/
- @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA)
static Optional getCompatibleVersion(
@Nullable ParsedMediaType acceptHeader,
@Nullable ParsedMediaType contentTypeHeader,
@@ -52,8 +50,7 @@ static Optional getCompatibleVersion(
if (hasContent) {
// content-type version must be current or prior
- // This can be uncommented once all references to RestApiVersion.V_7 are removed
- /*if (contentTypeVersion > RestApiVersion.current().major || contentTypeVersion < RestApiVersion.minimumSupported().major) {
+ if (contentTypeVersion > RestApiVersion.current().major || contentTypeVersion < RestApiVersion.minimumSupported().major) {
throw new ElasticsearchStatusException(
"Content-Type version must be either version {} or {}, but found {}. Content-Type={}",
RestStatus.BAD_REQUEST,
@@ -62,7 +59,7 @@ static Optional getCompatibleVersion(
contentTypeVersion,
contentTypeHeader
);
- }*/
+ }
// if both accept and content-type are sent, the version must match
if (contentTypeVersion != acceptVersion) {
throw new ElasticsearchStatusException(
diff --git a/server/src/main/java/org/elasticsearch/rest/RestFeatures.java b/server/src/main/java/org/elasticsearch/rest/RestFeatures.java
deleted file mode 100644
index e72b30526c8e3..0000000000000
--- a/server/src/main/java/org/elasticsearch/rest/RestFeatures.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.rest;
-
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction;
-
-import java.util.Set;
-
-import static org.elasticsearch.search.fetch.subphase.highlight.DefaultHighlighter.UNIFIED_HIGHLIGHTER_MATCHED_FIELDS;
-
-public class RestFeatures implements FeatureSpecification {
- @Override
- public Set getFeatures() {
- return Set.of(
- RestNodesCapabilitiesAction.CAPABILITIES_ACTION,
- RestNodesCapabilitiesAction.LOCAL_ONLY_CAPABILITIES,
- UNIFIED_HIGHLIGHTER_MATCHED_FIELDS
- );
- }
-}
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java
index ca9e4abcaeec7..477cc1acb8319 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java
@@ -11,15 +11,16 @@
import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction;
import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse;
-import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestUtils;
import org.elasticsearch.rest.Scope;
import org.elasticsearch.rest.ServerlessScope;
+import org.elasticsearch.rest.action.RestCancellableNodeClient;
import org.elasticsearch.rest.action.RestToXContentListener;
import java.io.IOException;
@@ -52,19 +53,14 @@ public String getName() {
return "cluster_get_settings_action";
}
- private static void setUpRequestParams(MasterNodeReadRequest> clusterRequest, RestRequest request) {
- clusterRequest.local(request.paramAsBoolean("local", clusterRequest.local()));
- }
-
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
final boolean renderDefaults = request.paramAsBoolean("include_defaults", false);
ClusterGetSettingsAction.Request clusterSettingsRequest = new ClusterGetSettingsAction.Request(getMasterNodeTimeout(request));
+ RestUtils.consumeDeprecatedLocalParameter(request);
- setUpRequestParams(clusterSettingsRequest, request);
-
- return channel -> client.execute(
+ return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute(
ClusterGetSettingsAction.INSTANCE,
clusterSettingsRequest,
new RestToXContentListener(channel).map(
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java
index 265cdd5979adf..7d660e527a814 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesCapabilitiesAction.java
@@ -12,7 +12,6 @@
import org.elasticsearch.action.admin.cluster.node.capabilities.NodesCapabilitiesRequest;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.Scope;
@@ -30,8 +29,6 @@
@ServerlessScope(Scope.INTERNAL)
public class RestNodesCapabilitiesAction extends BaseRestHandler {
- public static final NodeFeature CAPABILITIES_ACTION = new NodeFeature("rest.capabilities_action", true);
- public static final NodeFeature LOCAL_ONLY_CAPABILITIES = new NodeFeature("rest.local_only_capabilities", true);
private static final Set SUPPORTED_QUERY_PARAMETERS = Set.of(
"timeout",
"method",
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java
index ec8bb6285bdd4..da7a7d3379ee0 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java
@@ -14,16 +14,13 @@
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.metadata.DesiredNode;
import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestToXContentListener;
-import org.elasticsearch.xcontent.XContentParseException;
import org.elasticsearch.xcontent.XContentParser;
import java.io.IOException;
import java.util.List;
-import java.util.function.Predicate;
import static org.elasticsearch.rest.RestUtils.getAckTimeout;
import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout;
@@ -33,11 +30,6 @@ public class RestUpdateDesiredNodesAction extends BaseRestHandler {
private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestUpdateDesiredNodesAction.class);
private static final String VERSION_DEPRECATION_MESSAGE =
"[version removal] Specifying node_version in desired nodes requests is deprecated.";
- private final Predicate clusterSupportsFeature;
-
- public RestUpdateDesiredNodesAction(Predicate clusterSupportsFeature) {
- this.clusterSupportsFeature = clusterSupportsFeature;
- }
@Override
public String getName() {
@@ -67,14 +59,8 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli
);
}
- if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) {
- if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) {
- deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE);
- }
- } else {
- if (updateDesiredNodesRequest.getNodes().stream().anyMatch(n -> n.hasVersion() == false)) {
- throw new XContentParseException("[node_version] field is required and must have a valid value");
- }
+ if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) {
+ deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE);
}
return restChannel -> client.execute(
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java
index 27620fa750ea9..f9e02646041a6 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java
@@ -13,7 +13,6 @@
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.core.TimeValue;
import org.elasticsearch.http.HttpChannel;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestRequest;
@@ -51,11 +50,9 @@ public String getName() {
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
- final GetMappingsRequest getMappingsRequest = new GetMappingsRequest();
+ final GetMappingsRequest getMappingsRequest = new GetMappingsRequest(getMasterNodeTimeout(request));
getMappingsRequest.indices(indices);
getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions()));
- final TimeValue timeout = getMasterNodeTimeout(request);
- getMappingsRequest.masterNodeTimeout(timeout);
getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local()));
final HttpChannel httpChannel = request.getHttpChannel();
return channel -> new RestCancellableNodeClient(client, httpChannel).admin()
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java
index 4cc010c15ffb9..1689d234387cc 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java
@@ -18,6 +18,7 @@
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.Scope;
import org.elasticsearch.rest.ServerlessScope;
+import org.elasticsearch.rest.action.RestCancellableNodeClient;
import org.elasticsearch.rest.action.RestToXContentListener;
import java.io.IOException;
@@ -41,8 +42,10 @@ public String getName() {
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
- SimulateIndexTemplateRequest simulateIndexTemplateRequest = new SimulateIndexTemplateRequest(request.param("name"));
- simulateIndexTemplateRequest.masterNodeTimeout(getMasterNodeTimeout(request));
+ SimulateIndexTemplateRequest simulateIndexTemplateRequest = new SimulateIndexTemplateRequest(
+ getMasterNodeTimeout(request),
+ request.param("name")
+ );
simulateIndexTemplateRequest.includeDefaults(request.paramAsBoolean("include_defaults", false));
if (request.hasContent()) {
TransportPutComposableIndexTemplateAction.Request indexTemplateRequest = new TransportPutComposableIndexTemplateAction.Request(
@@ -57,7 +60,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
simulateIndexTemplateRequest.indexTemplateRequest(indexTemplateRequest);
}
- return channel -> client.execute(
+ return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute(
SimulateIndexTemplateAction.INSTANCE,
simulateIndexTemplateRequest,
new RestToXContentListener<>(channel)
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java
index cc2e3136a4163..47b68e24a9ed0 100644
--- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java
@@ -17,6 +17,7 @@
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.Scope;
import org.elasticsearch.rest.ServerlessScope;
+import org.elasticsearch.rest.action.RestCancellableNodeClient;
import org.elasticsearch.rest.action.RestToXContentListener;
import java.io.IOException;
@@ -39,8 +40,10 @@ public String getName() {
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
- SimulateTemplateAction.Request simulateRequest = new SimulateTemplateAction.Request();
- simulateRequest.templateName(request.param("name"));
+ SimulateTemplateAction.Request simulateRequest = new SimulateTemplateAction.Request(
+ getMasterNodeTimeout(request),
+ request.param("name")
+ );
simulateRequest.includeDefaults(request.paramAsBoolean("include_defaults", false));
if (request.hasContent()) {
TransportPutComposableIndexTemplateAction.Request indexTemplateRequest = new TransportPutComposableIndexTemplateAction.Request(
@@ -54,8 +57,11 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli
simulateRequest.indexTemplateRequest(indexTemplateRequest);
}
- simulateRequest.masterNodeTimeout(getMasterNodeTimeout(request));
- return channel -> client.execute(SimulateTemplateAction.INSTANCE, simulateRequest, new RestToXContentListener<>(channel));
+ return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute(
+ SimulateTemplateAction.INSTANCE,
+ simulateRequest,
+ new RestToXContentListener<>(channel)
+ );
}
}
diff --git a/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java b/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java
index 88756ddbf4ed5..cd781e42379d1 100644
--- a/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java
+++ b/server/src/main/java/org/elasticsearch/script/ScriptFeatures.java
@@ -17,6 +17,6 @@
public final class ScriptFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(VectorScoreScriptUtils.HAMMING_DISTANCE_FUNCTION, ScriptTermStats.TERM_STAT_FEATURE);
+ return Set.of();
}
}
diff --git a/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java b/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java
index 82f6e972e1266..9c51afce3a49d 100644
--- a/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java
+++ b/server/src/main/java/org/elasticsearch/script/ScriptTermStats.java
@@ -16,7 +16,6 @@
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.common.util.CachedSupplier;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import java.io.IOException;
@@ -30,8 +29,6 @@
*/
public class ScriptTermStats {
- public static final NodeFeature TERM_STAT_FEATURE = new NodeFeature("script.term_stats", true);
-
private final IntSupplier docIdSupplier;
private final Term[] terms;
private final IndexSearcher searcher;
diff --git a/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java
index 9b4d105eea100..bdebdcc1eecb4 100644
--- a/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java
+++ b/server/src/main/java/org/elasticsearch/script/VectorScoreScriptUtils.java
@@ -10,7 +10,6 @@
package org.elasticsearch.script;
import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper;
import org.elasticsearch.script.field.vectors.DenseVector;
import org.elasticsearch.script.field.vectors.DenseVectorDocValuesField;
@@ -21,8 +20,6 @@
public class VectorScoreScriptUtils {
- public static final NodeFeature HAMMING_DISTANCE_FUNCTION = new NodeFeature("script.hamming", true);
-
public static class DenseVectorFunction {
protected final ScoreScript scoreScript;
protected final DenseVectorDocValuesField field;
diff --git a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java
index 553511346b182..98dd7f9388c1f 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchFeatures.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchFeatures.java
@@ -11,7 +11,6 @@
import org.elasticsearch.features.FeatureSpecification;
import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.search.vectors.KnnVectorQueryBuilder;
import java.util.Set;
@@ -21,7 +20,7 @@ public final class SearchFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(KnnVectorQueryBuilder.K_PARAM_SUPPORTED, LUCENE_10_0_0_UPGRADE);
+ return Set.of(LUCENE_10_0_0_UPGRADE);
}
public static final NodeFeature RETRIEVER_RESCORER_ENABLED = new NodeFeature("search.retriever.rescorer.enabled");
diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index 8c21abe4180ea..6d47493e4d063 100644
--- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -1402,9 +1402,6 @@ private SearchSourceBuilder parseXContent(
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (RETRIEVER.match(currentFieldName, parser.getDeprecationHandler())) {
- if (clusterSupportsFeature.test(RetrieverBuilder.RETRIEVERS_SUPPORTED) == false) {
- throw new ParsingException(parser.getTokenLocation(), "Unknown key for a START_OBJECT in [retriever].");
- }
retrieverBuilder = RetrieverBuilder.parseTopLevelRetrieverBuilder(
parser,
new RetrieverParserContext(searchUsage, clusterSupportsFeature)
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesPhase.java
index 5220dadec7a1a..53116ff8f6ebf 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesPhase.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesPhase.java
@@ -9,10 +9,12 @@
package org.elasticsearch.search.fetch.subphase;
import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.ScorerSupplier;
+import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.search.fetch.FetchContext;
@@ -52,8 +54,9 @@ public FetchSubPhaseProcessor getProcessor(FetchContext context) throws IOExcept
);
}
return new FetchSubPhaseProcessor() {
+ record ScorerAndIterator(Scorer scorer, DocIdSetIterator approximation, TwoPhaseIterator twoPhase) {}
- final Map matchingIterators = new HashMap<>();
+ final Map matchingIterators = new HashMap<>();
@Override
public void setNextReader(LeafReaderContext readerContext) throws IOException {
@@ -63,7 +66,14 @@ public void setNextReader(LeafReaderContext readerContext) throws IOException {
if (ss != null) {
Scorer scorer = ss.get(0L);
if (scorer != null) {
- matchingIterators.put(entry.getKey(), scorer);
+ final TwoPhaseIterator twoPhase = scorer.twoPhaseIterator();
+ final DocIdSetIterator iterator;
+ if (twoPhase == null) {
+ iterator = scorer.iterator();
+ } else {
+ iterator = twoPhase.approximation();
+ }
+ matchingIterators.put(entry.getKey(), new ScorerAndIterator(scorer, iterator, twoPhase));
}
}
}
@@ -73,13 +83,13 @@ public void setNextReader(LeafReaderContext readerContext) throws IOException {
public void process(HitContext hitContext) throws IOException {
Map matches = new LinkedHashMap<>();
int doc = hitContext.docId();
- for (Map.Entry entry : matchingIterators.entrySet()) {
- Scorer scorer = entry.getValue();
- if (scorer.iterator().docID() < doc) {
- scorer.iterator().advance(doc);
+ for (Map.Entry entry : matchingIterators.entrySet()) {
+ ScorerAndIterator query = entry.getValue();
+ if (query.approximation.docID() < doc) {
+ query.approximation.advance(doc);
}
- if (scorer.iterator().docID() == doc) {
- matches.put(entry.getKey(), scorer.score());
+ if (query.approximation.docID() == doc && (query.twoPhase == null || query.twoPhase.matches())) {
+ matches.put(entry.getKey(), query.scorer.score());
}
}
hitContext.hit().matchedQueries(matches);
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java
index b16617e2eb4d9..c47f815c18639 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/DefaultHighlighter.java
@@ -22,7 +22,6 @@
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.text.Text;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -51,8 +50,6 @@
public class DefaultHighlighter implements Highlighter {
- public static final NodeFeature UNIFIED_HIGHLIGHTER_MATCHED_FIELDS = new NodeFeature("unified_highlighter_matched_fields", true);
-
@Override
public boolean canHighlight(MappedFieldType fieldType) {
return true;
diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
index 9eb0170af5efb..d899a390d8be7 100644
--- a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
+++ b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
@@ -102,14 +102,6 @@ private SearchLookup(SearchLookup searchLookup, Set fieldChain) {
this.fieldLookupProvider = searchLookup.fieldLookupProvider;
}
- private SearchLookup(SearchLookup searchLookup, SourceProvider sourceProvider, Set fieldChain) {
- this.fieldChain = Collections.unmodifiableSet(fieldChain);
- this.sourceProvider = sourceProvider;
- this.fieldTypeLookup = searchLookup.fieldTypeLookup;
- this.fieldDataLookup = searchLookup.fieldDataLookup;
- this.fieldLookupProvider = searchLookup.fieldLookupProvider;
- }
-
/**
* Creates a copy of the current {@link SearchLookup} that looks fields up in the same way, but also tracks field references
* in order to detect cycles and prevent resolving fields that depend on more than {@link #MAX_FIELD_CHAIN_DEPTH} other fields.
@@ -153,7 +145,4 @@ public Source getSource(LeafReaderContext ctx, int doc) throws IOException {
return sourceProvider.getSource(ctx, doc);
}
- public SearchLookup swapSourceProvider(SourceProvider sourceProvider) {
- return new SearchLookup(this, sourceProvider, fieldChain);
- }
}
diff --git a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java
index e64bbe3c39d79..4374c06da365d 100644
--- a/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java
+++ b/server/src/main/java/org/elasticsearch/search/rank/feature/RankFeatureShardPhase.java
@@ -83,7 +83,7 @@ public static void processFetch(SearchContext searchContext) {
// FetchSearchResult#shardResult()
SearchHits hits = fetchSearchResult.hits();
RankFeatureShardResult featureRankShardResult = (RankFeatureShardResult) rankFeaturePhaseRankShardContext
- .buildRankFeatureShardResult(hits, searchContext.shardTarget().getShardId().id());
+ .buildRankFeatureShardResult(hits, searchContext.request().shardRequestIndex());
// save the result in the search context
// need to add profiling info as well available from fetch
if (featureRankShardResult != null) {
diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java
index ecc03d05b28a6..737d2aa397c34 100644
--- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java
@@ -10,8 +10,6 @@
package org.elasticsearch.search.retriever;
import org.apache.lucene.util.SetOnce;
-import org.elasticsearch.common.ParsingException;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
@@ -46,7 +44,6 @@
public final class KnnRetrieverBuilder extends RetrieverBuilder {
public static final String NAME = "knn";
- public static final NodeFeature KNN_RETRIEVER_SUPPORTED = new NodeFeature("knn_retriever_supported", true);
public static final ParseField FIELD_FIELD = new ParseField("field");
public static final ParseField K_FIELD = new ParseField("k");
@@ -103,9 +100,6 @@ public final class KnnRetrieverBuilder extends RetrieverBuilder {
}
public static KnnRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException {
- if (context.clusterSupportsFeature(KNN_RETRIEVER_SUPPORTED) == false) {
- throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]");
- }
return PARSER.apply(parser, context);
}
diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java
index 357555cc59942..ce852a44c28ec 100644
--- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java
@@ -13,7 +13,6 @@
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
@@ -50,8 +49,6 @@
*/
public abstract class RetrieverBuilder implements Rewriteable, ToXContent {
- public static final NodeFeature RETRIEVERS_SUPPORTED = new NodeFeature("retrievers_supported", true);
-
public static final ParseField PRE_FILTER_FIELD = new ParseField("filter");
public static final ParseField MIN_SCORE_FIELD = new ParseField("min_score");
diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java
index 74a8b30c8e7dc..bfd6f572a9e65 100644
--- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java
+++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieversFeatures.java
@@ -22,10 +22,6 @@ public class RetrieversFeatures implements FeatureSpecification {
@Override
public Set getFeatures() {
- return Set.of(
- RetrieverBuilder.RETRIEVERS_SUPPORTED,
- StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED,
- KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED
- );
+ return Set.of();
}
}
diff --git a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java
index 2ffb9e3a98028..3ca74dc133d47 100644
--- a/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/retriever/StandardRetrieverBuilder.java
@@ -9,8 +9,6 @@
package org.elasticsearch.search.retriever;
-import org.elasticsearch.common.ParsingException;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
@@ -39,7 +37,6 @@
public final class StandardRetrieverBuilder extends RetrieverBuilder implements ToXContent {
public static final String NAME = "standard";
- public static final NodeFeature STANDARD_RETRIEVER_SUPPORTED = new NodeFeature("standard_retriever_supported", true);
public static final ParseField QUERY_FIELD = new ParseField("query");
public static final ParseField SEARCH_AFTER_FIELD = new ParseField("search_after");
@@ -81,9 +78,6 @@ public final class StandardRetrieverBuilder extends RetrieverBuilder implements
}
public static StandardRetrieverBuilder fromXContent(XContentParser parser, RetrieverParserContext context) throws IOException {
- if (context.clusterSupportsFeature(STANDARD_RETRIEVER_SUPPORTED) == false) {
- throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]");
- }
return PARSER.apply(parser, context);
}
diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
index 193191658af08..565fd7325a5ac 100644
--- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
+++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java
@@ -22,7 +22,6 @@
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.core.Nullable;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NestedObjectMapper;
import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper;
@@ -56,8 +55,6 @@
* {@link org.apache.lucene.search.KnnByteVectorQuery}.
*/
public class KnnVectorQueryBuilder extends AbstractQueryBuilder {
- public static final NodeFeature K_PARAM_SUPPORTED = new NodeFeature("search.vectors.k_param_supported", true);
-
public static final String NAME = "knn";
private static final int NUM_CANDS_LIMIT = 10_000;
private static final float NUM_CANDS_MULTIPLICATIVE_FACTOR = 1.5f;
diff --git a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java
index 4be1ee9ddc513..a6a7b458c216d 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorService.java
@@ -13,9 +13,7 @@
import org.elasticsearch.cluster.metadata.RepositoriesMetadata;
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
import org.elasticsearch.cluster.service.ClusterService;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.Diagnosis;
-import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.health.HealthIndicatorDetails;
import org.elasticsearch.health.HealthIndicatorImpact;
import org.elasticsearch.health.HealthIndicatorResult;
@@ -100,11 +98,9 @@ public class RepositoryIntegrityHealthIndicatorService implements HealthIndicato
);
private final ClusterService clusterService;
- private final FeatureService featureService;
- public RepositoryIntegrityHealthIndicatorService(ClusterService clusterService, FeatureService featureService) {
+ public RepositoryIntegrityHealthIndicatorService(ClusterService clusterService) {
this.clusterService = clusterService;
- this.featureService = featureService;
}
@Override
@@ -175,15 +171,8 @@ private RepositoryHealthAnalyzer(
|| invalidRepositories.isEmpty() == false) {
healthStatus = YELLOW;
} else if (repositoriesHealthByNode.isEmpty()) {
- clusterHasFeature = featureService.clusterHasFeature(
- clusterState,
- HealthFeatures.SUPPORTS_EXTENDED_REPOSITORY_INDICATOR
- ) == false;
- if (clusterHasFeature) {
- healthStatus = GREEN;
- } else {
- healthStatus = UNKNOWN;
- }
+ clusterHasFeature = false;
+ healthStatus = UNKNOWN;
} else {
healthStatus = GREEN;
}
diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
index f260c7aad30e5..debe3d6e6bd92 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
@@ -70,7 +70,6 @@
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.shard.IndexLongFieldRange;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
@@ -158,7 +157,7 @@ public final class RestoreService implements ClusterStateApplier {
SETTING_CREATION_DATE,
SETTING_HISTORY_UUID,
IndexSettings.MODE.getKey(),
- SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
+ IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(),
IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_FIELD_SETTING.getKey(),
IndexSortConfig.INDEX_SORT_ORDER_SETTING.getKey(),
diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
index f55e3740aaa8f..37a3ec586d104 100644
--- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
+++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -27,7 +27,6 @@
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.ReportingService;
import org.elasticsearch.telemetry.metric.Instrument;
@@ -120,13 +119,7 @@ public static class Names {
public static final String THREAD_POOL_METRIC_NAME_REJECTED = ".threads.rejected.total";
public enum ThreadPoolType {
- @Deprecated(forRemoval = true)
- @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // no longer used, remove in v9
- DIRECT("direct"),
FIXED("fixed"),
- @Deprecated(forRemoval = true)
- @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // no longer used, remove in v9
- FIXED_AUTO_QUEUE_SIZE("fixed_auto_queue_size"),
SCALING("scaling");
private final String type;
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java
index a43b1e7440bda..ec9b30e71e62c 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java
@@ -41,9 +41,14 @@ final class TransportHandshaker {
* ignores the body of the request. After the handshake, the OutboundHandler uses the min(local,remote) protocol version for all later
* messages.
*
- * This version supports two handshake protocols, v6080099 and v7170099, which respectively have the same message structure as the
- * transport protocols of v6.8.0 and v7.17.0. This node only sends v7170099 requests, but it can send a valid response to any v6080099
- * requests that it receives.
+ * This version supports three handshake protocols, v6080099, v7170099 and v8800000, which respectively have the same message structure
+ * as the transport protocols of v6.8.0, v7.17.0, and v8.18.0. This node only sends v7170099 requests, but it can send a valid response
+ * to any v6080099 or v8800000 requests that it receives.
+ *
+ * Note that these are not really TransportVersion constants as used elsewhere in ES, they're independent things that just happen to be
+ * stored in the same location in the message header and which roughly match the same ID numbering scheme. Older versions of ES did
+ * rely on them matching the real transport protocol (which itself matched the release version numbers), but these days that's no longer
+ * true.
*
* Here are some example messages, broken down to show their structure:
*
@@ -79,7 +84,7 @@ final class TransportHandshaker {
* c3 f9 eb 03 -- max acceptable protocol version (vInt: 00000011 11101011 11111001 11000011 == 8060099)
*
*
- * ## v7170099 Request:
+ * ## v7170099 and v8800000 Requests:
*
* 45 53 -- 'ES' marker
* 00 00 00 31 -- total message length
@@ -98,7 +103,7 @@ final class TransportHandshaker {
* 04 -- payload length
* c3 f9 eb 03 -- max acceptable protocol version (vInt: 00000011 11101011 11111001 11000011 == 8060099)
*
- * ## v7170099 Response:
+ * ## v7170099 and v8800000 Responses:
*
* 45 53 -- 'ES' marker
* 00 00 00 17 -- total message length
@@ -118,7 +123,12 @@ final class TransportHandshaker {
static final TransportVersion EARLIEST_HANDSHAKE_VERSION = TransportVersion.fromId(6080099);
static final TransportVersion REQUEST_HANDSHAKE_VERSION = TransportVersions.MINIMUM_COMPATIBLE;
- static final Set ALLOWED_HANDSHAKE_VERSIONS = Set.of(EARLIEST_HANDSHAKE_VERSION, REQUEST_HANDSHAKE_VERSION);
+ static final TransportVersion V9_HANDSHAKE_VERSION = TransportVersion.fromId(8_800_00_0);
+ static final Set ALLOWED_HANDSHAKE_VERSIONS = Set.of(
+ EARLIEST_HANDSHAKE_VERSION,
+ REQUEST_HANDSHAKE_VERSION,
+ V9_HANDSHAKE_VERSION
+ );
static final String HANDSHAKE_ACTION_NAME = "internal:tcp/handshake";
private final ConcurrentMap pendingHandshakes = new ConcurrentHashMap<>();
diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
index 8fa188efbd4a3..a4f606b54827b 100644
--- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
+++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification
@@ -7,21 +7,13 @@
# License v3.0 only", or the "Server Side Public License, v 1".
#
-org.elasticsearch.action.admin.indices.stats.IndicesStatsFeatures
org.elasticsearch.action.bulk.BulkFeatures
org.elasticsearch.features.FeatureInfrastructureFeatures
-org.elasticsearch.health.HealthFeatures
-org.elasticsearch.cluster.metadata.MetadataFeatures
-org.elasticsearch.rest.RestFeatures
-org.elasticsearch.repositories.RepositoriesFeatures
-org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures
org.elasticsearch.rest.action.admin.cluster.ClusterRerouteFeatures
org.elasticsearch.index.IndexFeatures
org.elasticsearch.index.mapper.MapperFeatures
-org.elasticsearch.ingest.IngestGeoIpFeatures
org.elasticsearch.search.SearchFeatures
org.elasticsearch.search.retriever.RetrieversFeatures
org.elasticsearch.script.ScriptFeatures
-org.elasticsearch.reservedstate.service.FileSettingsFeatures
org.elasticsearch.cluster.routing.RoutingFeatures
org.elasticsearch.action.admin.cluster.stats.ClusterStatsFeatures
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java
index f3d8f8860ba83..d9bf3e0e99c81 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsActionTests.java
@@ -17,7 +17,6 @@
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsTests;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.test.ClusterServiceUtils;
@@ -35,8 +34,6 @@
import static org.hamcrest.Matchers.anEmptyMap;
import static org.hamcrest.Matchers.not;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
@@ -48,7 +45,6 @@ public class TransportGetAllocationStatsActionTests extends ESTestCase {
private ClusterService clusterService;
private TransportService transportService;
private AllocationStatsService allocationStatsService;
- private FeatureService featureService;
private TransportGetAllocationStatsAction action;
@@ -67,15 +63,13 @@ public void setUp() throws Exception {
Set.of()
);
allocationStatsService = mock(AllocationStatsService.class);
- featureService = mock(FeatureService.class);
action = new TransportGetAllocationStatsAction(
transportService,
clusterService,
threadPool,
new ActionFilters(Set.of()),
null,
- allocationStatsService,
- featureService
+ allocationStatsService
);
}
@@ -99,8 +93,6 @@ public void testReturnsOnlyRequestedStats() throws Exception {
);
when(allocationStatsService.stats()).thenReturn(Map.of(randomIdentifier(), NodeAllocationStatsTests.randomNodeAllocationStats()));
- when(featureService.clusterHasFeature(any(ClusterState.class), eq(AllocationStatsFeatures.INCLUDE_DISK_THRESHOLD_SETTINGS)))
- .thenReturn(true);
var future = new PlainActionFuture();
action.masterOperation(mock(Task.class), request, ClusterState.EMPTY_STATE, future);
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java
index 8a51963097dae..9fef4c4ed328f 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java
@@ -13,8 +13,8 @@
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.index.IndexVersion;
+import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.indices.SystemIndexDescriptor;
import org.elasticsearch.indices.SystemIndexDescriptorUtils;
import org.elasticsearch.indices.SystemIndices;
@@ -31,7 +31,8 @@
public class TransportGetFeatureUpgradeStatusActionTests extends ESTestCase {
public static String TEST_SYSTEM_INDEX_PATTERN = ".test*";
- private static final IndexVersion TEST_OLD_VERSION = IndexVersion.fromId(6000099);
+ // Version just before MINIMUM_COMPATIBLE in order to check that UpgradeStatus.MIGRATION_NEEDED is set correctly
+ private static final IndexVersion TEST_OLD_VERSION = IndexVersion.fromId(IndexVersions.MINIMUM_COMPATIBLE.id() - 1);
private static final ClusterState CLUSTER_STATE = getClusterState();
private static final SystemIndices.Feature FEATURE = getFeature();
@@ -85,8 +86,6 @@ private static ClusterState getClusterState() {
.numberOfReplicas(0)
.build();
- @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA)
- // Once we start testing 9.x, we should update this test to use a 7.x "version created"
IndexMetadata indexMetadata2 = IndexMetadata.builder(".test-index-2")
.settings(Settings.builder().put("index.version.created", TEST_OLD_VERSION).build())
.numberOfShards(1)
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsSerializationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsSerializationTests.java
deleted file mode 100644
index 5954de0586985..0000000000000
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsSerializationTests.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.action.admin.cluster.settings;
-
-import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.AbstractWireSerializingTestCase;
-
-public class ClusterGetSettingsSerializationTests extends AbstractWireSerializingTestCase {
- @Override
- protected Writeable.Reader instanceReader() {
- return ClusterGetSettingsAction.Response::new;
- }
-
- @Override
- protected ClusterGetSettingsAction.Response createTestInstance() {
- final Settings persistentSettings = Settings.builder()
- .put("persistent.foo.filtered", "bar")
- .put("persistent.foo.non_filtered", "baz")
- .build();
-
- final Settings transientSettings = Settings.builder()
- .put("transient.foo.filtered", "bar")
- .put("transient.foo.non_filtered", "baz")
- .build();
-
- final Settings allSettings = Settings.builder().put(persistentSettings).put(transientSettings).build();
-
- return new ClusterGetSettingsAction.Response(persistentSettings, transientSettings, allSettings);
- }
-
- @Override
- protected ClusterGetSettingsAction.Response mutateInstance(ClusterGetSettingsAction.Response instance) {
- final Settings otherSettings = Settings.builder().put("random.setting", randomAlphaOfLength(randomIntBetween(1, 12))).build();
- return switch (between(0, 2)) {
- case 0 -> new ClusterGetSettingsAction.Response(otherSettings, instance.transientSettings(), instance.settings());
- case 1 -> new ClusterGetSettingsAction.Response(instance.persistentSettings(), otherSettings, instance.settings());
- case 2 -> new ClusterGetSettingsAction.Response(instance.persistentSettings(), instance.transientSettings(), otherSettings);
- default -> throw new IllegalStateException("Unexpected switch value");
- };
- }
-}
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsTests.java
index 47f3f71bcc11b..58341b7d7fb9a 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsTests.java
@@ -12,17 +12,18 @@
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.MockUtils;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
+import java.util.Map;
import static org.mockito.Mockito.mock;
@@ -54,10 +55,8 @@ public void testTransportFilters() throws Exception {
TransportClusterGetSettingsAction action = new TransportClusterGetSettingsAction(
transportService,
mock(ClusterService.class),
- threadPool,
filter,
- mock(ActionFilters.class),
- mock(IndexNameExpressionResolver.class)
+ mock(ActionFilters.class)
);
final Settings persistentSettings = Settings.builder()
@@ -74,7 +73,8 @@ public void testTransportFilters() throws Exception {
final ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build();
final PlainActionFuture future = new PlainActionFuture<>();
- action.masterOperation(null, null, clusterState, future);
+ final var task = new CancellableTask(1, "test", ClusterGetSettingsAction.NAME, "", null, Map.of());
+ action.localClusterStateOperation(task, null, clusterState, future);
assertTrue(future.isDone());
final ClusterGetSettingsAction.Response response = future.get();
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java
index 96954458c18c4..0617053769da0 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java
@@ -17,8 +17,8 @@
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexVersion;
-import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.script.Script;
import org.elasticsearch.tasks.TaskCancelledException;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
@@ -586,7 +586,7 @@ public void testSourceModes() {
int numDisabledIndices = randomIntBetween(1, 5);
for (int i = 0; i < numSyntheticIndices; i++) {
IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo-synthetic-" + i).settings(
- indexSettings(IndexVersion.current(), 4, 1).put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "synthetic")
+ indexSettings(IndexVersion.current(), 4, 1).put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "synthetic")
);
builder.put(indexMetadata);
}
@@ -594,7 +594,7 @@ public void testSourceModes() {
IndexMetadata.Builder indexMetadata;
if (randomBoolean()) {
indexMetadata = new IndexMetadata.Builder("foo-stored-" + i).settings(
- indexSettings(IndexVersion.current(), 4, 1).put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "stored")
+ indexSettings(IndexVersion.current(), 4, 1).put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "stored")
);
} else {
indexMetadata = new IndexMetadata.Builder("foo-stored-" + i).settings(indexSettings(IndexVersion.current(), 4, 1));
@@ -603,7 +603,7 @@ public void testSourceModes() {
}
for (int i = 0; i < numDisabledIndices; i++) {
IndexMetadata.Builder indexMetadata = new IndexMetadata.Builder("foo-disabled-" + i).settings(
- indexSettings(IndexVersion.current(), 4, 1).put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "disabled")
+ indexSettings(IndexVersion.current(), 4, 1).put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), "disabled")
);
builder.put(indexMetadata);
}
diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
index f04180bde30f2..657a03066ada9 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java
@@ -43,7 +43,6 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.cache.query.QueryCacheStats;
@@ -121,7 +120,6 @@ public class TransportRolloverActionTests extends ESTestCase {
final DataStreamAutoShardingService dataStreamAutoShardingService = new DataStreamAutoShardingService(
Settings.EMPTY,
mockClusterService,
- new FeatureService(List.of()),
System::currentTimeMillis
);
diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java
index 02a4c134ccd15..1d861b340543d 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequestTests.java
@@ -12,45 +12,21 @@
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
-import org.elasticsearch.cluster.metadata.ComposableIndexTemplateTests;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Template;
-import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.AbstractWireSerializingTestCase;
+import org.elasticsearch.test.ESTestCase;
import java.util.List;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
-public class SimulateIndexTemplateRequestTests extends AbstractWireSerializingTestCase {
-
- @Override
- protected Writeable.Reader instanceReader() {
- return SimulateIndexTemplateRequest::new;
- }
-
- @Override
- protected SimulateIndexTemplateRequest createTestInstance() {
- SimulateIndexTemplateRequest req = new SimulateIndexTemplateRequest(randomAlphaOfLength(10));
- TransportPutComposableIndexTemplateAction.Request newTemplateRequest = new TransportPutComposableIndexTemplateAction.Request(
- randomAlphaOfLength(4)
- );
- newTemplateRequest.indexTemplate(ComposableIndexTemplateTests.randomInstance());
- req.indexTemplateRequest(newTemplateRequest);
- req.includeDefaults(randomBoolean());
- return req;
- }
-
- @Override
- protected SimulateIndexTemplateRequest mutateInstance(SimulateIndexTemplateRequest instance) {
- return randomValueOtherThan(instance, this::createTestInstance);
- }
+public class SimulateIndexTemplateRequestTests extends ESTestCase {
public void testIndexNameCannotBeNullOrEmpty() {
- expectThrows(IllegalArgumentException.class, () -> new SimulateIndexTemplateRequest((String) null));
- expectThrows(IllegalArgumentException.class, () -> new SimulateIndexTemplateRequest(""));
+ expectThrows(IllegalArgumentException.class, () -> new SimulateIndexTemplateRequest(TEST_REQUEST_TIMEOUT, null));
+ expectThrows(IllegalArgumentException.class, () -> new SimulateIndexTemplateRequest(TEST_REQUEST_TIMEOUT, ""));
}
public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() {
@@ -60,7 +36,7 @@ public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() {
TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test");
request.indexTemplate(globalTemplate);
- SimulateIndexTemplateRequest simulateRequest = new SimulateIndexTemplateRequest("testing");
+ SimulateIndexTemplateRequest simulateRequest = new SimulateIndexTemplateRequest(TEST_REQUEST_TIMEOUT, "testing");
simulateRequest.indexTemplateRequest(request);
ActionRequestValidationException validationException = simulateRequest.validate();
diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java
index 14ebf260d3bf9..6163566a5e593 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateRequestTests.java
@@ -12,49 +12,17 @@
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
-import org.elasticsearch.cluster.metadata.ComposableIndexTemplateTests;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Template;
-import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.test.AbstractWireSerializingTestCase;
+import org.elasticsearch.test.ESTestCase;
import java.util.List;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
-public class SimulateTemplateRequestTests extends AbstractWireSerializingTestCase {
-
- @Override
- protected Writeable.Reader instanceReader() {
- return SimulateTemplateAction.Request::new;
- }
-
- @Override
- protected SimulateTemplateAction.Request createTestInstance() {
- SimulateTemplateAction.Request req = new SimulateTemplateAction.Request(randomAlphaOfLength(10));
- TransportPutComposableIndexTemplateAction.Request newTemplateRequest = new TransportPutComposableIndexTemplateAction.Request(
- randomAlphaOfLength(4)
- );
- newTemplateRequest.indexTemplate(ComposableIndexTemplateTests.randomInstance());
- req.indexTemplateRequest(newTemplateRequest);
- req.includeDefaults(randomBoolean());
- return req;
- }
-
- @Override
- protected SimulateTemplateAction.Request mutateInstance(SimulateTemplateAction.Request instance) {
- return randomValueOtherThan(instance, this::createTestInstance);
- }
-
- public void testIndexNameCannotBeNullOrEmpty() {
- expectThrows(IllegalArgumentException.class, () -> new SimulateTemplateAction.Request((String) null));
- expectThrows(
- IllegalArgumentException.class,
- () -> new SimulateTemplateAction.Request((TransportPutComposableIndexTemplateAction.Request) null)
- );
- }
+public class SimulateTemplateRequestTests extends ESTestCase {
public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() {
Template template = new Template(Settings.builder().put(IndexMetadata.SETTING_INDEX_HIDDEN, true).build(), null, null);
@@ -63,7 +31,7 @@ public void testAddingGlobalTemplateWithHiddenIndexSettingIsIllegal() {
TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("test");
request.indexTemplate(globalTemplate);
- SimulateTemplateAction.Request simulateRequest = new SimulateTemplateAction.Request("testing");
+ SimulateTemplateAction.Request simulateRequest = new SimulateTemplateAction.Request(TEST_REQUEST_TIMEOUT, "testing");
simulateRequest.indexTemplateRequest(request);
ActionRequestValidationException validationException = simulateRequest.validate();
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
index 50885fc399c89..c6f923ce7cc03 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java
@@ -153,7 +153,6 @@ class TestTransportBulkAction extends TransportBulkAction {
transportService,
TransportBulkActionIngestTests.this.clusterService,
ingestService,
- mockFeatureService,
new NodeClient(Settings.EMPTY, TransportBulkActionIngestTests.this.threadPool),
new ActionFilters(Collections.emptySet()),
TestIndexNameExpressionResolver.newInstance(),
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java
index 587bc2e3ba333..616488d3472ff 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java
@@ -113,7 +113,6 @@ class TestTransportBulkAction extends TransportBulkAction {
transportService,
TransportBulkActionTests.this.clusterService,
null,
- mockFeatureService,
new NodeClient(Settings.EMPTY, TransportBulkActionTests.this.threadPool),
new ActionFilters(Collections.emptySet()),
new Resolver(),
diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
index 2f033e4b5a383..544a373675ee5 100644
--- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
+++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java
@@ -247,7 +247,6 @@ static class TestTransportBulkAction extends TransportBulkAction {
transportService,
clusterService,
null,
- null,
client,
actionFilters,
indexNameExpressionResolver,
diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java
index 3a43a1df9bf88..14ecf85b3aa7e 100644
--- a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java
@@ -27,9 +27,6 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
-import org.elasticsearch.features.FeatureSpecification;
-import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexVersion;
@@ -81,12 +78,6 @@ public void setupService() {
service = new DataStreamAutoShardingService(
Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build(),
clusterService,
- new FeatureService(List.of(new FeatureSpecification() {
- @Override
- public Set getFeatures() {
- return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE);
- }
- })),
() -> now
);
dataStreamName = randomAlphaOfLengthBetween(10, 100);
@@ -113,14 +104,6 @@ public void testCalculateValidations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -129,12 +112,6 @@ public void testCalculateValidations() {
DataStreamAutoShardingService disabledAutoshardingService = new DataStreamAutoShardingService(
Settings.EMPTY,
clusterService,
- new FeatureService(List.of(new FeatureSpecification() {
- @Override
- public Set getFeatures() {
- return Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE);
- }
- })),
System::currentTimeMillis
);
@@ -142,46 +119,6 @@ public Set getFeatures() {
assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT));
}
- {
- // cluster doesn't have feature
- ClusterState stateNoFeature = ClusterState.builder(ClusterName.DEFAULT)
- .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(Map.of("n1", Set.of(), "n2", Set.of()))
- .metadata(Metadata.builder())
- .build();
-
- Settings settings = Settings.builder().put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true).build();
- DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService(
- settings,
- clusterService,
- new FeatureService(List.of()),
- () -> now
- );
-
- AutoShardingResult autoShardingResult = noFeatureService.calculate(stateNoFeature, dataStream, 2.0);
- assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT));
- }
-
- {
- Settings settings = Settings.builder()
- .put(DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_ENABLED, true)
- .putList(
- DataStreamAutoShardingService.DATA_STREAMS_AUTO_SHARDING_EXCLUDES_SETTING.getKey(),
- List.of("foo", dataStreamName + "*")
- )
- .build();
- // patterns are configured to exclude the current data stream
- DataStreamAutoShardingService noFeatureService = new DataStreamAutoShardingService(
- settings,
- clusterService,
- new FeatureService(List.of()),
- () -> now
- );
-
- AutoShardingResult autoShardingResult = noFeatureService.calculate(state, dataStream, 2.0);
- assertThat(autoShardingResult, is(NOT_APPLICABLE_RESULT));
- }
-
{
// null write load passed
AutoShardingResult autoShardingResult = service.calculate(state, dataStream, null);
@@ -209,14 +146,6 @@ public void testCalculateIncreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -248,14 +177,6 @@ public void testCalculateIncreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -287,14 +208,6 @@ public void testCalculateIncreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -326,14 +239,6 @@ public void testCalculateDecreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -367,14 +272,6 @@ public void testCalculateDecreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -416,14 +313,6 @@ public void testCalculateDecreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -463,14 +352,6 @@ public void testCalculateDecreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
@@ -504,14 +385,6 @@ public void testCalculateDecreaseShardingRecommendations() {
builder.put(dataStream);
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("n1")).add(DiscoveryNodeUtils.create("n2")))
- .nodeFeatures(
- Map.of(
- "n1",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()),
- "n2",
- Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id())
- )
- )
.metadata(builder)
.build();
diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java
index 3623683532c59..3ed74392f746e 100644
--- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java
@@ -561,6 +561,9 @@ public void testValidateIndexName() throws Exception {
validateIndexName(checkerService, "..", "must not be '.' or '..'");
validateIndexName(checkerService, "foo:bar", "must not contain ':'");
+
+ validateIndexName(checkerService, "", "must not be empty");
+ validateIndexName(checkerService, null, "must not be empty");
}));
}
diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceTests.java
index 760900817780a..2c15addfe217b 100644
--- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceTests.java
@@ -46,19 +46,19 @@ public void testShardMovements() {
);
assertThat(
- "1 shard movements when existing shard is moved and new shard copy is unassigned",
+ "1 shard movements when an existing shard copy is moved and new shard copy is unassigned",
shardMovements(new ShardAssignment(Set.of("a", "b"), 2, 0, 0), new ShardAssignment(Set.of("a", "c"), 3, 1, 0)),
equalTo(1)
);
assertThat(
- "1 shard movement",
+ "1 shard movement when an existing shard copy is moved",
shardMovements(new ShardAssignment(Set.of("a", "b"), 2, 0, 0), new ShardAssignment(Set.of("a", "c"), 2, 0, 0)),
equalTo(1)
);
assertThat(
- "2 shard movement",
+ "2 shard movements when both shard copies are move to new nodes",
shardMovements(new ShardAssignment(Set.of("a", "b"), 2, 0, 0), new ShardAssignment(Set.of("c", "d"), 2, 0, 0)),
equalTo(2)
);
@@ -77,10 +77,10 @@ public void testShardMovements() {
}
private static int shardMovements(ShardAssignment old, ShardAssignment updated) {
- return DesiredBalance.shardMovements(of(old), of(updated));
+ return DesiredBalance.shardMovements(createDesiredBalanceWith(old), createDesiredBalanceWith(updated));
}
- private static DesiredBalance of(ShardAssignment assignment) {
+ private static DesiredBalance createDesiredBalanceWith(ShardAssignment assignment) {
return new DesiredBalance(1, Map.of(new ShardId("index", "_na_", 0), assignment));
}
}
diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java
index 7c1c954e7b4e9..e6f50ef42365e 100644
--- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java
@@ -66,6 +66,12 @@ public long relativeTimeInMillis() {
assertThat(Thread.currentThread().getName(), containsString(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME));
return currentTimeMillis;
}
+
+ @Override
+ public long rawRelativeTimeInMillis() {
+ assertThat(Thread.currentThread().getName(), containsString(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME));
+ return currentTimeMillis;
+ }
};
clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
allowClusterStateApplicationFailure = false;
@@ -207,15 +213,33 @@ public void testLongClusterStateUpdateLogging() throws Exception {
);
mockLog.addExpectation(
new MockLog.SeenEventExpectation(
- "test4",
+ "test3",
ClusterApplierService.class.getCanonicalName(),
Level.WARN,
"*cluster state applier task [test3] took [34s] which is above the warn threshold of [*]: "
+ "[running task [test3]] took [*"
)
);
+ mockLog.addExpectation(
+ new MockLog.SeenEventExpectation(
+ "test4",
+ ClusterApplierService.class.getCanonicalName(),
+ Level.WARN,
+ "*cluster state applier task [test4] took [36s] which is above the warn threshold of [*]: "
+ + "[running task [test4]] took [*"
+ )
+ );
+ mockLog.addExpectation(
+ new MockLog.SeenEventExpectation(
+ "test5",
+ ClusterApplierService.class.getCanonicalName(),
+ Level.WARN,
+ "*cluster state applier task [test5] took [38s] which is above the warn threshold of [*]: "
+ + "[running task [test5]] took [*"
+ )
+ );
- final CountDownLatch latch = new CountDownLatch(4);
+ final CountDownLatch latch = new CountDownLatch(6);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
currentTimeMillis = randomLongBetween(0L, Long.MAX_VALUE / 2);
clusterApplierService.runOnApplierThread(
@@ -266,9 +290,39 @@ public void onFailure(Exception e) {
}
}
);
+ clusterApplierService.runOnApplierThread("test4", Priority.HIGH, currentState -> {
+ // do nothing (testing that onResponse is included in timing)
+ }, new ActionListener<>() {
+
+ @Override
+ public void onResponse(Void unused) {
+ advanceTime(TimeValue.timeValueSeconds(36).millis());
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ fail();
+ }
+ });
+ clusterApplierService.runOnApplierThread("test5", Priority.HIGH, currentState -> {
+ throw new IllegalArgumentException("Testing that onFailure is included in timing");
+ }, new ActionListener<>() {
+
+ @Override
+ public void onResponse(Void unused) {
+ fail();
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ advanceTime(TimeValue.timeValueSeconds(38).millis());
+ latch.countDown();
+ }
+ });
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
- clusterApplierService.runOnApplierThread("test4", Priority.HIGH, currentState -> {}, new ActionListener<>() {
+ clusterApplierService.runOnApplierThread("test6", Priority.HIGH, currentState -> {}, new ActionListener<>() {
@Override
public void onResponse(Void ignored) {
latch.countDown();
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java
index de1990361e766..8f62f8d33eacd 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/CompatibleNamedXContentRegistryTests.java
@@ -12,7 +12,6 @@
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.core.RestApiVersion;
-import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.FakeRestRequest;
@@ -116,8 +115,6 @@ public static NewSubObject parse(XContentParser parser) {
}
}
- @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA)
- @AwaitsFix(bugUrl = "this can be re-enabled once our rest api version is bumped to V_9")
public void testNotCompatibleRequest() throws IOException {
NamedXContentRegistry registry = new NamedXContentRegistry(
List.of(
diff --git a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java
index 7a63934b4810b..eca30fdce2cf8 100644
--- a/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java
+++ b/server/src/test/java/org/elasticsearch/health/node/LocalHealthMonitorTests.java
@@ -24,8 +24,6 @@
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.RelativeByteSizeValue;
import org.elasticsearch.core.TimeValue;
-import org.elasticsearch.features.FeatureService;
-import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.health.HealthStatus;
import org.elasticsearch.health.metadata.HealthMetadata;
import org.elasticsearch.health.node.selection.HealthNode;
@@ -119,18 +117,9 @@ public void setUp() throws Exception {
client = mock(Client.class);
- FeatureService featureService = new FeatureService(List.of(new HealthFeatures()));
-
mockHealthTracker = new MockHealthTracker();
- localHealthMonitor = LocalHealthMonitor.create(
- Settings.EMPTY,
- clusterService,
- threadPool,
- client,
- featureService,
- List.of(mockHealthTracker)
- );
+ localHealthMonitor = LocalHealthMonitor.create(Settings.EMPTY, clusterService, threadPool, client, List.of(mockHealthTracker));
}
@After
diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java
index 51dadd8154549..3069589f9556c 100644
--- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java
+++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java
@@ -22,7 +22,6 @@
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.features.FeatureService;
-import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.persistent.PersistentTaskState;
import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
import org.elasticsearch.persistent.PersistentTasksService;
@@ -78,7 +77,7 @@ public void setUp() throws Exception {
localNodeId = clusterService.localNode().getId();
persistentTasksService = mock(PersistentTasksService.class);
settings = Settings.builder().build();
- featureService = new FeatureService(List.of(new HealthFeatures()));
+ featureService = new FeatureService(List.of());
clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
}
diff --git a/server/src/test/java/org/elasticsearch/index/codec/vectors/es816/ES816BinaryQuantizedVectorsWriter.java b/server/src/test/java/org/elasticsearch/index/codec/vectors/es816/ES816BinaryQuantizedVectorsWriter.java
index 4d97235c5fae5..61bd5323b5b43 100644
--- a/server/src/test/java/org/elasticsearch/index/codec/vectors/es816/ES816BinaryQuantizedVectorsWriter.java
+++ b/server/src/test/java/org/elasticsearch/index/codec/vectors/es816/ES816BinaryQuantizedVectorsWriter.java
@@ -437,32 +437,35 @@ private CloseableRandomVectorScorerSupplier mergeOneFieldToIndex(
float[] centroid,
float cDotC
) throws IOException {
- long vectorDataOffset = binarizedVectorData.alignFilePointer(Float.BYTES);
- final IndexOutput tempQuantizedVectorData = segmentWriteState.directory.createTempOutput(
- binarizedVectorData.getName(),
- "temp",
- segmentWriteState.context
- );
- final IndexOutput tempScoreQuantizedVectorData = segmentWriteState.directory.createTempOutput(
- binarizedVectorData.getName(),
- "score_temp",
- segmentWriteState.context
- );
- IndexInput binarizedDataInput = null;
- IndexInput binarizedScoreDataInput = null;
- boolean success = false;
- int descritizedDimension = BQVectorUtils.discretize(fieldInfo.getVectorDimension(), 64);
- BinaryQuantizer quantizer = new BinaryQuantizer(
+ final long vectorDataOffset = binarizedVectorData.alignFilePointer(Float.BYTES);
+ final int descritizedDimension = BQVectorUtils.discretize(fieldInfo.getVectorDimension(), 64);
+ final BinaryQuantizer quantizer = new BinaryQuantizer(
fieldInfo.getVectorDimension(),
descritizedDimension,
fieldInfo.getVectorSimilarityFunction()
);
+
+ IndexOutput tempQuantizedVectorData = null;
+ IndexOutput tempScoreQuantizedVectorData = null;
+ final DocsWithFieldSet docsWithField;
+ boolean success = false;
+
try {
+ tempQuantizedVectorData = segmentWriteState.directory.createTempOutput(
+ binarizedVectorData.getName(),
+ "temp",
+ segmentWriteState.context
+ );
+ tempScoreQuantizedVectorData = segmentWriteState.directory.createTempOutput(
+ binarizedVectorData.getName(),
+ "score_temp",
+ segmentWriteState.context
+ );
FloatVectorValues floatVectorValues = KnnVectorsWriter.MergedVectorValues.mergeFloatVectorValues(fieldInfo, mergeState);
if (fieldInfo.getVectorSimilarityFunction() == COSINE) {
floatVectorValues = new NormalizedFloatVectorValues(floatVectorValues);
}
- DocsWithFieldSet docsWithField = writeBinarizedVectorAndQueryData(
+ docsWithField = writeBinarizedVectorAndQueryData(
tempQuantizedVectorData,
tempScoreQuantizedVectorData,
floatVectorValues,
@@ -470,13 +473,30 @@ private CloseableRandomVectorScorerSupplier mergeOneFieldToIndex(
quantizer
);
CodecUtil.writeFooter(tempQuantizedVectorData);
- IOUtils.close(tempQuantizedVectorData);
+ CodecUtil.writeFooter(tempScoreQuantizedVectorData);
+ success = true;
+ } finally {
+ if (success) {
+ IOUtils.close(tempQuantizedVectorData, tempScoreQuantizedVectorData);
+ } else {
+ IOUtils.closeWhileHandlingException(tempQuantizedVectorData, tempScoreQuantizedVectorData);
+ if (tempQuantizedVectorData != null) {
+ IOUtils.deleteFilesIgnoringExceptions(segmentWriteState.directory, tempQuantizedVectorData.getName());
+ }
+ if (tempScoreQuantizedVectorData != null) {
+ IOUtils.deleteFilesIgnoringExceptions(segmentWriteState.directory, tempScoreQuantizedVectorData.getName());
+ }
+ }
+ }
+
+ IndexInput binarizedDataInput = null;
+ IndexInput binarizedScoreDataInput = null;
+ success = false;
+ try {
binarizedDataInput = segmentWriteState.directory.openInput(tempQuantizedVectorData.getName(), segmentWriteState.context);
binarizedVectorData.copyBytes(binarizedDataInput, binarizedDataInput.length() - CodecUtil.footerLength());
- long vectorDataLength = binarizedVectorData.getFilePointer() - vectorDataOffset;
+ final long vectorDataLength = binarizedVectorData.getFilePointer() - vectorDataOffset;
CodecUtil.retrieveChecksum(binarizedDataInput);
- CodecUtil.writeFooter(tempScoreQuantizedVectorData);
- IOUtils.close(tempScoreQuantizedVectorData);
binarizedScoreDataInput = segmentWriteState.directory.openInput(
tempScoreQuantizedVectorData.getName(),
segmentWriteState.context
@@ -490,10 +510,9 @@ private CloseableRandomVectorScorerSupplier mergeOneFieldToIndex(
cDotC,
docsWithField
);
- success = true;
final IndexInput finalBinarizedDataInput = binarizedDataInput;
final IndexInput finalBinarizedScoreDataInput = binarizedScoreDataInput;
- OffHeapBinarizedVectorValues vectorValues = new OffHeapBinarizedVectorValues.DenseOffHeapVectorValues(
+ final OffHeapBinarizedVectorValues vectorValues = new OffHeapBinarizedVectorValues.DenseOffHeapVectorValues(
fieldInfo.getVectorDimension(),
docsWithField.cardinality(),
centroid,
@@ -503,7 +522,7 @@ private CloseableRandomVectorScorerSupplier mergeOneFieldToIndex(
vectorsScorer,
finalBinarizedDataInput
);
- RandomVectorScorerSupplier scorerSupplier = vectorsScorer.getRandomVectorScorerSupplier(
+ final RandomVectorScorerSupplier scorerSupplier = vectorsScorer.getRandomVectorScorerSupplier(
fieldInfo.getVectorSimilarityFunction(),
new OffHeapBinarizedQueryVectorValues(
finalBinarizedScoreDataInput,
@@ -513,22 +532,20 @@ private CloseableRandomVectorScorerSupplier mergeOneFieldToIndex(
),
vectorValues
);
+ final String tempQuantizedVectorDataName = tempQuantizedVectorData.getName();
+ final String tempScoreQuantizedVectorDataName = tempScoreQuantizedVectorData.getName();
+ success = true;
return new BinarizedCloseableRandomVectorScorerSupplier(scorerSupplier, vectorValues, () -> {
IOUtils.close(finalBinarizedDataInput, finalBinarizedScoreDataInput);
IOUtils.deleteFilesIgnoringExceptions(
segmentWriteState.directory,
- tempQuantizedVectorData.getName(),
- tempScoreQuantizedVectorData.getName()
+ tempQuantizedVectorDataName,
+ tempScoreQuantizedVectorDataName
);
});
} finally {
if (success == false) {
- IOUtils.closeWhileHandlingException(
- tempQuantizedVectorData,
- tempScoreQuantizedVectorData,
- binarizedDataInput,
- binarizedScoreDataInput
- );
+ IOUtils.closeWhileHandlingException(binarizedDataInput, binarizedScoreDataInput);
IOUtils.deleteFilesIgnoringExceptions(
segmentWriteState.directory,
tempQuantizedVectorData.getName(),
diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java
index 2a6c3428d6d45..a5d5d9b210e33 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneSyntheticSourceChangesSnapshotTests.java
@@ -19,14 +19,12 @@
import java.io.IOException;
-import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING;
-
public class LuceneSyntheticSourceChangesSnapshotTests extends SearchBasedChangesSnapshotTests {
@Override
protected Settings indexSettings() {
return Settings.builder()
.put(super.indexSettings())
- .put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name())
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
}
diff --git a/server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java b/server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java
index b764bce464d15..d0455c14bd784 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/TranslogOperationAsserterTests.java
@@ -24,8 +24,6 @@
import java.io.IOException;
-import static org.elasticsearch.index.mapper.SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING;
-
public class TranslogOperationAsserterTests extends EngineTestCase {
@Override
@@ -33,7 +31,7 @@ protected Settings indexSettings() {
return Settings.builder()
.put(super.indexSettings())
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
- .put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name())
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
}
@@ -57,10 +55,10 @@ EngineConfig engineConfig(boolean useSyntheticSource) {
EngineConfig config = engine.config();
Settings.Builder settings = Settings.builder().put(config.getIndexSettings().getSettings());
if (useSyntheticSource) {
- settings.put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name());
+ settings.put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.name());
settings.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true);
} else {
- settings.put(INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.name());
+ settings.put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.name());
settings.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), false);
}
IndexMetadata imd = IndexMetadata.builder(config.getIndexSettings().getIndexMetadata()).settings(settings).build();
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java
index 907a1a15721dc..4b0fac2cf2e0f 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java
@@ -54,7 +54,7 @@ public void putMappings() {
}
public void testGetMappings() {
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings().get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT).get();
assertExpectedMappings(getMappingsResponse.mappings());
}
@@ -71,7 +71,7 @@ public void testGetFieldMappings() {
assertFieldMappings(mappings.get("filtered"), FILTERED_FLAT_FIELDS);
// double check that submitting the filtered mappings to an unfiltered index leads to the same get field mappings output
// as the one coming from a filtered index with same mappings
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("filtered").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "filtered").get();
MappingMetadata filtered = getMappingsResponse.getMappings().get("filtered");
assertAcked(indicesAdmin().prepareCreate("test").setMapping(filtered.getSourceAsMap()));
GetFieldMappingsResponse response = indicesAdmin().prepareGetFieldMappings("test").setFields("*").get();
@@ -98,7 +98,7 @@ public void testFieldCapabilities() {
assertFieldCaps(filtered, filteredFields);
// double check that submitting the filtered mappings to an unfiltered index leads to the same field_caps output
// as the one coming from a filtered index with same mappings
- GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings("filtered").get();
+ GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "filtered").get();
MappingMetadata filteredMapping = getMappingsResponse.getMappings().get("filtered");
assertAcked(indicesAdmin().prepareCreate("test").setMapping(filteredMapping.getSourceAsMap()));
FieldCapabilitiesResponse test = client().fieldCaps(new FieldCapabilitiesRequest().fields("*").indices("test")).actionGet();
@@ -155,7 +155,7 @@ private void assertExpectedMappings(Map mappings) {
private void assertMappingsAreValid(Map sourceAsMap) {
// check that the returned filtered mappings are still valid mappings by submitting them and retrieving them back
assertAcked(indicesAdmin().prepareCreate("test").setMapping(sourceAsMap));
- GetMappingsResponse testMappingsResponse = indicesAdmin().prepareGetMappings("test").get();
+ GetMappingsResponse testMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
assertEquals(1, testMappingsResponse.getMappings().size());
// the mappings are returned unfiltered for this index, yet they are the same as the previous ones that were returned filtered
assertFiltered(testMappingsResponse.getMappings().get("test"));
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java
index 8646e1b66dcb0..1ba5f423d4b03 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperConfigurationTests.java
@@ -12,6 +12,7 @@
import org.apache.lucene.index.DirectoryReader;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.CheckedConsumer;
+import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
@@ -130,7 +131,7 @@ private MapperService mapperServiceWithCustomSettings(
for (var entry : customSettings.entrySet()) {
settings.put(entry.getKey(), entry.getValue());
}
- settings.put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC);
+ settings.put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC);
return createMapperService(settings.build(), mapping(mapping));
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTestCase.java b/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTestCase.java
index 6ea41763cbcc2..b669c717719b0 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTestCase.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/NonDynamicFieldMapperTestCase.java
@@ -43,7 +43,7 @@ public void testCreateExplicitMappingSucceeds() throws Exception {
""", getMapping());
var resp = client().admin().indices().prepareCreate("test").setMapping(mapping).get();
assertTrue(resp.isAcknowledged());
- var mappingsResp = client().admin().indices().prepareGetMappings("test").get();
+ var mappingsResp = client().admin().indices().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test").get();
var mappingMetadata = mappingsResp.getMappings().get("test");
var fieldType = XContentMapValues.extractValue("properties.field.type", mappingMetadata.getSourceAsMap());
assertThat(fieldType, equalTo(getTypeName()));
@@ -149,7 +149,7 @@ public void testCreateExplicitMappingInIndexTemplateSucceeds() throws Exception
var resp = client().prepareIndex("test1").setSource("field", "hello world").get();
assertThat(resp.status(), equalTo(RestStatus.CREATED));
- var mappingsResp = client().admin().indices().prepareGetMappings("test1").get();
+ var mappingsResp = client().admin().indices().prepareGetMappings(TEST_REQUEST_TIMEOUT, "test1").get();
var mappingMetadata = mappingsResp.getMappings().get("test1");
var fieldType = XContentMapValues.extractValue("properties.field.type", mappingMetadata.getSourceAsMap());
assertThat(fieldType, equalTo(getTypeName()));
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java
index b7693513a434d..bc560d94b8f52 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java
@@ -427,7 +427,7 @@ public void testRecoverySourceWitInvalidSettings() {
{
Settings settings = Settings.builder()
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString())
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED.toString())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
IllegalArgumentException exc = expectThrows(
@@ -470,7 +470,7 @@ public void testRecoverySourceWitInvalidSettings() {
public void testRecoverySourceWithSyntheticSource() throws IOException {
{
Settings settings = Settings.builder()
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString())
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString())
.build();
MapperService mapperService = createMapperService(settings, topMapping(b -> {}));
DocumentMapper docMapper = mapperService.documentMapper();
@@ -480,7 +480,7 @@ public void testRecoverySourceWithSyntheticSource() throws IOException {
}
{
Settings settings = Settings.builder()
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString())
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC.toString())
.put(IndexSettings.RECOVERY_USE_SYNTHETIC_SOURCE_SETTING.getKey(), true)
.build();
MapperService mapperService = createMapperService(settings, topMapping(b -> {}));
@@ -539,7 +539,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
DocumentMapper docMapper = mapperService.documentMapper();
@@ -549,7 +549,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
final DocumentMapper docMapper = mapperService.documentMapper();
@@ -559,7 +559,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
final DocumentMapper docMapper = mapperService.documentMapper();
@@ -571,7 +571,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
DocumentMapper docMapper = mapperService.documentMapper();
@@ -581,7 +581,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
final DocumentMapper docMapper = mapperService.documentMapper();
@@ -591,7 +591,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.LOGSDB.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
.build();
var ex = expectThrows(MapperParsingException.class, () -> createMapperService(settings, mappings));
assertEquals("Failed to parse mapping: _source can not be disabled in index using [logsdb] index mode", ex.getMessage());
@@ -613,7 +613,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
""";
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "routing_field")
.build();
final MapperService mapperService = createMapperService(settings, mappings);
@@ -635,7 +635,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
""";
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "routing_field")
.build();
final MapperService mapperService = createMapperService(settings, mappings);
@@ -657,7 +657,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
""";
final Settings settings = Settings.builder()
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name())
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
.put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "routing_field")
.build();
var ex = expectThrows(MapperParsingException.class, () -> createMapperService(settings, mappings));
@@ -668,7 +668,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
{
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.SYNTHETIC)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
DocumentMapper docMapper = mapperService.documentMapper();
@@ -677,7 +677,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
{
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.STORED)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
final DocumentMapper docMapper = mapperService.documentMapper();
@@ -686,7 +686,7 @@ public void testStandardIndexModeWithSourceModeSetting() throws IOException {
{
final XContentBuilder mappings = topMapping(b -> {});
final Settings settings = Settings.builder()
- .put(SourceFieldMapper.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
+ .put(IndexSettings.INDEX_MAPPER_SOURCE_MODE_SETTING.getKey(), SourceFieldMapper.Mode.DISABLED)
.build();
final MapperService mapperService = createMapperService(settings, mappings);
final DocumentMapper docMapper = mapperService.documentMapper();
diff --git a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java
index 15febaa8db2ab..8a84b0ec59b51 100644
--- a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java
@@ -32,7 +32,6 @@
import org.elasticsearch.http.HttpInfo;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats;
-import org.elasticsearch.reservedstate.service.FileSettingsFeatures;
import org.elasticsearch.reservedstate.service.FileSettingsService;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.MockLog;
@@ -48,7 +47,6 @@
import java.nio.channels.ServerSocketChannel;
import java.util.Collections;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import static org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata.ErrorKind.TRANSIENT;
@@ -60,7 +58,6 @@ public class ReadinessServiceTests extends ESTestCase implements ReadinessClient
private ThreadPool threadpool;
private Environment env;
private FakeHttpTransport httpTransport;
- private static final Set nodeFeatures = Set.of(FileSettingsFeatures.FILE_SETTINGS_SUPPORTED.id());
private static Metadata emptyReservedStateMetadata;
static {
@@ -310,26 +307,6 @@ public void testFileSettingsUpdateError() throws Exception {
readinessService.close();
}
- public void testFileSettingsMixedCluster() throws Exception {
- readinessService.start();
-
- // initially the service isn't ready because initial cluster state has not been applied yet
- assertFalse(readinessService.ready());
-
- ClusterState noFileSettingsState = ClusterState.builder(noFileSettingsState())
- // the master node is upgraded to support file settings, but existing node2 is not
- .nodeFeatures(Map.of(httpTransport.node.getId(), nodeFeatures))
- .build();
- ClusterChangedEvent event = new ClusterChangedEvent("test", noFileSettingsState, emptyState());
- readinessService.clusterChanged(event);
-
- // when upgrading from nodes before file settings exist, readiness should return true once a master is elected
- assertTrue(readinessService.ready());
-
- readinessService.stop();
- readinessService.close();
- }
-
private ClusterState emptyState() {
return ClusterState.builder(new ClusterName("cluster"))
.nodes(
@@ -347,7 +324,6 @@ private ClusterState noFileSettingsState() {
.masterNodeId(httpTransport.node.getId())
.localNodeId(httpTransport.node.getId())
)
- .nodeFeatures(Map.of(httpTransport.node.getId(), nodeFeatures, "node2", nodeFeatures))
.build();
}
}
diff --git a/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java b/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java
index 040ab9fd5c2e9..028438b5e9267 100644
--- a/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java
+++ b/server/src/test/java/org/elasticsearch/rest/RestCompatibleVersionHelperTests.java
@@ -210,12 +210,15 @@ public void testObsoleteVersion() {
assertThat(
e.getMessage(),
equalTo(
- "A compatible version is required on both Content-Type and Accept headers if either one has requested a "
- + "compatible version and the compatible versions must match. "
- + "Accept="
- + acceptHeader(PREVIOUS_VERSION)
- + ", Content-Type="
- + contentTypeHeader(OBSOLETE_VERSION)
+ "Content-Type version must be either version "
+ + CURRENT_VERSION
+ + " or "
+ + PREVIOUS_VERSION
+ + ", but found "
+ + OBSOLETE_VERSION
+ + ". "
+ + "Content-Type="
+ + acceptHeader(OBSOLETE_VERSION)
)
);
}
diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
index 0f73c367ff2ef..1bcc89b68c141 100644
--- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java
@@ -574,11 +574,10 @@ public void testPreTagsWithoutPostTags() throws IOException {
assertEquals("pre_tags are set but post_tags are not set", e.getCause().getCause().getMessage());
}
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/119723")
public void testInvalidMaxAnalyzedOffset() throws IOException {
XContentParseException e = expectParseThrows(
XContentParseException.class,
- "{ \"max_analyzed_offset\" : " + randomIntBetween(-100, -1) + "}"
+ "{ \"max_analyzed_offset\" : " + randomIntBetween(-100, -2) + "}"
);
assertThat(e.getMessage(), containsString("[highlight] failed to parse field [" + MAX_ANALYZED_OFFSET_FIELD.toString() + "]"));
assertThat(e.getCause().getMessage(), containsString("[max_analyzed_offset] must be a positive integer, or -1"));
diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java
index da28b0eff441f..2724b86f9acd4 100644
--- a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java
+++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java
@@ -11,6 +11,7 @@
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.core.Predicates;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.MatchNoneQueryBuilder;
@@ -88,10 +89,7 @@ protected KnnRetrieverBuilder createTestInstance() {
protected KnnRetrieverBuilder doParseInstance(XContentParser parser) throws IOException {
return (KnnRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder(
parser,
- new RetrieverParserContext(
- new SearchUsage(),
- nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED
- )
+ new RetrieverParserContext(new SearchUsage(), Predicates.never())
);
}
diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java
deleted file mode 100644
index 6448d11de2e47..0000000000000
--- a/server/src/test/java/org/elasticsearch/search/retriever/RetrieverBuilderVersionTests.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the "Elastic License
- * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
- * Public License v 1"; you may not use this file except in compliance with, at
- * your election, the "Elastic License 2.0", the "GNU Affero General Public
- * License v3.0 only", or the "Server Side Public License, v 1".
- */
-
-package org.elasticsearch.search.retriever;
-
-import org.elasticsearch.common.ParsingException;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.features.NodeFeature;
-import org.elasticsearch.search.SearchModule;
-import org.elasticsearch.search.builder.SearchSourceBuilder;
-import org.elasticsearch.test.ESTestCase;
-import org.elasticsearch.xcontent.NamedXContentRegistry;
-import org.elasticsearch.xcontent.XContentParser;
-import org.elasticsearch.xcontent.json.JsonXContent;
-
-import java.io.IOException;
-import java.util.List;
-
-/** Tests retrievers validate on their own {@link NodeFeature} */
-public class RetrieverBuilderVersionTests extends ESTestCase {
-
- public void testRetrieverVersions() throws IOException {
- try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) {
- SearchSourceBuilder ssb = new SearchSourceBuilder();
- ParsingException iae = expectThrows(ParsingException.class, () -> ssb.parseXContent(parser, true, nf -> false));
- assertEquals("Unknown key for a START_OBJECT in [retriever].", iae.getMessage());
- }
-
- try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) {
- SearchSourceBuilder ssb = new SearchSourceBuilder();
- ParsingException iae = expectThrows(
- ParsingException.class,
- () -> ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED)
- );
- assertEquals("unknown retriever [standard]", iae.getMessage());
- }
-
- try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"standard\":{}}}")) {
- SearchSourceBuilder ssb = new SearchSourceBuilder();
- ssb.parseXContent(
- parser,
- true,
- nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED
- );
- }
-
- try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"retriever\":{\"knn\":{}}}")) {
- SearchSourceBuilder ssb = new SearchSourceBuilder();
- ParsingException iae = expectThrows(
- ParsingException.class,
- () -> ssb.parseXContent(parser, true, nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED)
- );
- assertEquals("unknown retriever [knn]", iae.getMessage());
- }
-
- try (
- XContentParser parser = createParser(
- JsonXContent.jsonXContent,
- "{\"retriever\":{\"knn\":{\"field\": \"test\", \"k\": 2, \"num_candidates\": 5, \"query_vector\": [1, 2, 3]}}}"
- )
- ) {
- SearchSourceBuilder ssb = new SearchSourceBuilder();
- ssb.parseXContent(
- parser,
- true,
- nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == KnnRetrieverBuilder.KNN_RETRIEVER_SUPPORTED
- );
- }
- }
-
- @Override
- protected NamedXContentRegistry xContentRegistry() {
- return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents());
- }
-}
diff --git a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java
index eacd949077bc4..979c588089f5c 100644
--- a/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java
+++ b/server/src/test/java/org/elasticsearch/search/retriever/StandardRetrieverBuilderParsingTests.java
@@ -12,6 +12,7 @@
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.core.Predicates;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.MatchNoneQueryBuilder;
@@ -100,10 +101,7 @@ protected StandardRetrieverBuilder createTestInstance() {
protected StandardRetrieverBuilder doParseInstance(XContentParser parser) throws IOException {
return (StandardRetrieverBuilder) RetrieverBuilder.parseTopLevelRetrieverBuilder(
parser,
- new RetrieverParserContext(
- new SearchUsage(),
- nf -> nf == RetrieverBuilder.RETRIEVERS_SUPPORTED || nf == StandardRetrieverBuilder.STANDARD_RETRIEVER_SUPPORTED
- )
+ new RetrieverParserContext(new SearchUsage(), Predicates.never())
);
}
diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java
index cc21ade314715..cadd9d5196f69 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java
@@ -22,7 +22,6 @@
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.health.Diagnosis;
import org.elasticsearch.health.Diagnosis.Resource.Type;
-import org.elasticsearch.health.HealthFeatures;
import org.elasticsearch.health.HealthIndicatorDetails;
import org.elasticsearch.health.HealthIndicatorResult;
import org.elasticsearch.health.SimpleHealthIndicatorDetails;
@@ -37,7 +36,6 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.stream.Stream;
import static org.elasticsearch.cluster.node.DiscoveryNode.DISCOVERY_NODE_COMPARATOR;
@@ -382,10 +380,7 @@ public void testMappedFieldsForTelemetry() {
}
private ClusterState createClusterStateWith(RepositoriesMetadata metadata) {
- var features = Set.of(HealthFeatures.SUPPORTS_EXTENDED_REPOSITORY_INDICATOR.id());
- var builder = ClusterState.builder(new ClusterName("test-cluster"))
- .nodes(DiscoveryNodes.builder().add(node1).add(node2).build())
- .nodeFeatures(Map.of(node1.getId(), features, node2.getId(), features));
+ var builder = ClusterState.builder(new ClusterName("test-cluster")).nodes(DiscoveryNodes.builder().add(node1).add(node2).build());
if (metadata != null) {
builder.metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, metadata));
}
@@ -399,7 +394,7 @@ private static RepositoryMetadata createRepositoryMetadata(String name, boolean
private RepositoryIntegrityHealthIndicatorService createRepositoryIntegrityHealthIndicatorService(ClusterState clusterState) {
var clusterService = mock(ClusterService.class);
when(clusterService.state()).thenReturn(clusterState);
- return new RepositoryIntegrityHealthIndicatorService(clusterService, featureService);
+ return new RepositoryIntegrityHealthIndicatorService(clusterService);
}
private SimpleHealthIndicatorDetails createDetails(int total, int corruptedCount, List corrupted, int unknown, int invalid) {
diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java
index 7a07e407024ce..0fe886f37aa47 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java
@@ -241,9 +241,7 @@
import static org.hamcrest.Matchers.iterableWithSize;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue;
-import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
public class SnapshotResiliencyTests extends ESTestCase {
@@ -2111,8 +2109,6 @@ protected void connectToNodesAndWait(ClusterState newClusterState) {
}
);
recoverySettings = new RecoverySettings(settings, clusterSettings);
- FeatureService mockFeatureService = mock(FeatureService.class);
- when(mockFeatureService.clusterHasFeature(any(), any())).thenReturn(true);
mockTransport = new DisruptableMockTransport(node, deterministicTaskQueue) {
@Override
protected ConnectionStatus getConnectionStatus(DiscoveryNode destination) {
@@ -2403,7 +2399,6 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() {
null,
FailureStoreMetrics.NOOP
),
- mockFeatureService,
client,
actionFilters,
indexNameExpressionResolver,
diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java
index 889afab00e830..c0185832d6122 100644
--- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java
+++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java
@@ -27,6 +27,7 @@
import static org.elasticsearch.common.bytes.ReleasableBytesReferenceStreamInputTests.wrapAsReleasable;
import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.instanceOf;
@@ -182,7 +183,7 @@ public void testDecodePreHeaderSizeVariableInt() throws IOException {
}
}
- public void testDecodeHandshakeCompatibility() throws IOException {
+ public void testDecodeHandshakeV7Compatibility() throws IOException {
String action = "test-request";
long requestId = randomNonNegativeLong();
final String headerKey = randomAlphaOfLength(10);
@@ -223,6 +224,55 @@ public void testDecodeHandshakeCompatibility() throws IOException {
}
+ public void testDecodeHandshakeV8Compatibility() throws IOException {
+ doHandshakeCompatibilityTest(TransportHandshaker.REQUEST_HANDSHAKE_VERSION, null);
+ doHandshakeCompatibilityTest(TransportHandshaker.REQUEST_HANDSHAKE_VERSION, Compression.Scheme.DEFLATE);
+ }
+
+ public void testDecodeHandshakeV9Compatibility() throws IOException {
+ doHandshakeCompatibilityTest(TransportHandshaker.V9_HANDSHAKE_VERSION, null);
+ doHandshakeCompatibilityTest(TransportHandshaker.V9_HANDSHAKE_VERSION, Compression.Scheme.DEFLATE);
+ }
+
+ private void doHandshakeCompatibilityTest(TransportVersion transportVersion, Compression.Scheme compressionScheme) throws IOException {
+ String action = "test-request";
+ long requestId = randomNonNegativeLong();
+ final String headerKey = randomAlphaOfLength(10);
+ final String headerValue = randomAlphaOfLength(20);
+ threadContext.putHeader(headerKey, headerValue);
+ OutboundMessage message = new OutboundMessage.Request(
+ threadContext,
+ new TestRequest(randomAlphaOfLength(100)),
+ transportVersion,
+ action,
+ requestId,
+ true,
+ compressionScheme
+ );
+
+ try (RecyclerBytesStreamOutput os = new RecyclerBytesStreamOutput(recycler)) {
+ final BytesReference bytes = message.serialize(os);
+ int totalHeaderSize = TcpHeader.headerSize(transportVersion);
+
+ InboundDecoder decoder = new InboundDecoder(recycler);
+ final ArrayList