diff --git a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle index 60ae4d58f343e..90a4f74b5e9f4 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.ide.gradle @@ -10,6 +10,8 @@ import org.elasticsearch.gradle.util.Pair import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.test.TestUtil +import org.elasticsearch.gradle.internal.idea.EnablePreviewFeaturesTask +import org.elasticsearch.gradle.internal.idea.IdeaXmlUtil import org.jetbrains.gradle.ext.JUnit import java.nio.file.Files @@ -144,19 +146,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } // modifies the idea module config to enable preview features on ':libs:native' module - tasks.register("enablePreviewFeatures") { + tasks.register("enablePreviewFeatures", EnablePreviewFeaturesTask) { group = 'ide' description = 'Enables preview features on native library module' dependsOn tasks.named("enableExternalConfiguration") - -// ext { - def enablePreview = { moduleFile, languageLevel -> - IdeaXmlUtil.modifyXml(moduleFile) { xml -> - xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = languageLevel - } - } -// } - doLast { enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW') @@ -277,46 +270,6 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') { } } -/** - * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. - * - * @param path Path to existing XML file - * @param action Action to perform on parsed XML document - * @param preface optional front matter to add after the XML declaration - * but before the XML document, e.g. a doctype or comment - */ - -class IdeaXmlUtil { - static Node parseXml(Object xmlPath) { - File xmlFile = new File(xmlPath) - XmlParser xmlParser = new XmlParser(false, true, true) - xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false) - Node xml = xmlParser.parse(xmlFile) - return xml - } - - static void modifyXml(Object xmlPath, Action action, String preface = null) { - File xmlFile = new File(xmlPath) - if (xmlFile.exists()) { - Node xml = parseXml(xmlPath) - action.execute(xml) - - xmlFile.withPrintWriter { writer -> - def printer = new XmlNodePrinter(writer) - printer.namespaceAware = true - printer.preserveWhitespace = true - writer.write("\n") - - if (preface != null) { - writer.write(preface) - } - printer.print(xml) - } - } - } -} - - Pair locateElasticsearchWorkspace(Gradle gradle) { if (gradle.parent == null) { // See if any of these included builds is the Elasticsearch gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java new file mode 100644 index 0000000000000..f8c8b5127827f --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/EnablePreviewFeaturesTask.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.NodeList; + +import org.gradle.api.DefaultTask; +import org.xml.sax.SAXException; + +import java.io.IOException; + +import javax.xml.parsers.ParserConfigurationException; + +public class EnablePreviewFeaturesTask extends DefaultTask { + + public void enablePreview(String moduleFile, String languageLevel) throws IOException, ParserConfigurationException, SAXException { + IdeaXmlUtil.modifyXml(moduleFile, xml -> { + // Find the 'component' node + NodeList nodes = (NodeList) xml.depthFirst(); + Node componentNode = null; + for (Object node : nodes) { + Node currentNode = (Node) node; + if ("component".equals(currentNode.name()) && "NewModuleRootManager".equals(currentNode.attribute("name"))) { + componentNode = currentNode; + break; + } + } + + // Add the attribute to the 'component' node + if (componentNode != null) { + componentNode.attributes().put("LANGUAGE_LEVEL", languageLevel); + } + }); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java new file mode 100644 index 0000000000000..b7cc2862a0af1 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/idea/IdeaXmlUtil.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.gradle.internal.idea; + +import groovy.util.Node; +import groovy.util.XmlParser; +import groovy.xml.XmlNodePrinter; + +import org.gradle.api.Action; +import org.xml.sax.SAXException; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; + +import javax.xml.parsers.ParserConfigurationException; + +public class IdeaXmlUtil { + + static Node parseXml(String xmlPath) throws IOException, SAXException, ParserConfigurationException { + File xmlFile = new File(xmlPath); + XmlParser xmlParser = new XmlParser(false, true, true); + xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false); + Node xml = xmlParser.parse(xmlFile); + return xml; + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action) throws IOException, ParserConfigurationException, SAXException { + modifyXml(xmlPath, action, null); + } + + /** + * Parses a given XML file, applies a set of changes, and writes those changes back to the original file. + * + * @param path Path to existing XML file + * @param action Action to perform on parsed XML document + * @param preface optional front matter to add after the XML declaration + * but before the XML document, e.g. a doctype or comment + */ + static void modifyXml(String xmlPath, Action action, String preface) throws IOException, ParserConfigurationException, + SAXException { + File xmlFile = new File(xmlPath); + if (xmlFile.exists()) { + Node xml = parseXml(xmlPath); + action.execute(xml); + + try (PrintWriter writer = new PrintWriter(xmlFile)) { + var printer = new XmlNodePrinter(writer); + printer.setNamespaceAware(true); + printer.setPreserveWhitespace(true); + writer.write("\n"); + if (preface != null) { + writer.write(preface); + } + printer.print(xml); + } + } + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index 265bd52eabe83..916823fd91b61 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -89,7 +89,6 @@ public static void stopHttpServers() throws IOException { } public void testBuilderUsesDefaultSSLContext() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { try (RestClient client = buildRestClient()) { @@ -97,10 +96,15 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { client.performRequest(new Request("GET", "/")); fail("connection should have been rejected due to SSL handshake"); } catch (Exception e) { - assertThat(e, instanceOf(SSLHandshakeException.class)); + if (inFipsJvm()) { + // Bouncy Castle throw a different exception + assertThat(e, instanceOf(IOException.class)); + assertThat(e.getCause(), instanceOf(javax.net.ssl.SSLException.class)); + } else { + assertThat(e, instanceOf(SSLHandshakeException.class)); + } } } - SSLContext.setDefault(getSslContext()); try (RestClient client = buildRestClient()) { Response response = client.performRequest(new Request("GET", "/")); @@ -112,7 +116,6 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { } public void testBuilderSetsThreadName() throws Exception { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final SSLContext defaultSSLContext = SSLContext.getDefault(); try { SSLContext.setDefault(getSslContext()); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index ce951715939f0..8b3977fe66428 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -71,7 +71,7 @@ static List systemJvmOptions(Settings nodeSettings, final Map s).toList(); } @@ -140,7 +140,7 @@ private static Stream maybeWorkaroundG1Bug() { } @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) - private static Stream maybeAllowSecurityManager() { + private static Stream maybeAllowSecurityManager(boolean useEntitlements) { if (RuntimeVersionFeature.isSecurityManagerAvailable()) { // Will become conditional on useEntitlements once entitlements can run without SM return Stream.of("-Djava.security.manager=allow"); diff --git a/docs/changelog/118324.yaml b/docs/changelog/118324.yaml new file mode 100644 index 0000000000000..729ff56f6a253 --- /dev/null +++ b/docs/changelog/118324.yaml @@ -0,0 +1,6 @@ +pr: 118324 +summary: Allow the data type of `null` in filters +area: ES|QL +type: bug +issues: + - 116351 diff --git a/docs/changelog/118652.yaml b/docs/changelog/118652.yaml new file mode 100644 index 0000000000000..0b08686230405 --- /dev/null +++ b/docs/changelog/118652.yaml @@ -0,0 +1,5 @@ +pr: 118652 +summary: Add Jina AI API to do inference for Embedding and Rerank models +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/118871.yaml b/docs/changelog/118871.yaml new file mode 100644 index 0000000000000..3c1a06d450f39 --- /dev/null +++ b/docs/changelog/118871.yaml @@ -0,0 +1,5 @@ +pr: 118871 +summary: "[Elastic Inference Service] Add ElasticInferenceService Unified ChatCompletions Integration" +area: Inference +type: enhancement +issues: [] diff --git a/docs/changelog/118919.yaml b/docs/changelog/118919.yaml new file mode 100644 index 0000000000000..832fd86fe08ba --- /dev/null +++ b/docs/changelog/118919.yaml @@ -0,0 +1,5 @@ +pr: 118919 +summary: Remove unsupported timeout from rest-api-spec license API +area: License +type: bug +issues: [] diff --git a/docs/changelog/118921.yaml b/docs/changelog/118921.yaml new file mode 100644 index 0000000000000..bd341616d8a14 --- /dev/null +++ b/docs/changelog/118921.yaml @@ -0,0 +1,5 @@ +pr: 118921 +summary: Add missing timeouts to rest-api-spec shutdown APIs +area: Infra/Node Lifecycle +type: bug +issues: [] diff --git a/docs/changelog/118938.yaml b/docs/changelog/118938.yaml new file mode 100644 index 0000000000000..395da7912fd4b --- /dev/null +++ b/docs/changelog/118938.yaml @@ -0,0 +1,5 @@ +pr: 118938 +summary: Hash functions +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/118954.yaml b/docs/changelog/118954.yaml new file mode 100644 index 0000000000000..ab2f2cda5c11e --- /dev/null +++ b/docs/changelog/118954.yaml @@ -0,0 +1,5 @@ +pr: 118954 +summary: Add missing parameter to `xpack.info` rest-api-spec +area: Infra/REST API +type: bug +issues: [] diff --git a/docs/changelog/119067.yaml b/docs/changelog/119067.yaml new file mode 100644 index 0000000000000..c7ddd570bea18 --- /dev/null +++ b/docs/changelog/119067.yaml @@ -0,0 +1,5 @@ +pr: 119067 +summary: Metrics for indexing failures due to version conflicts +area: CRUD +type: feature +issues: [] diff --git a/docs/changelog/119250.yaml b/docs/changelog/119250.yaml new file mode 100644 index 0000000000000..9db36957d8050 --- /dev/null +++ b/docs/changelog/119250.yaml @@ -0,0 +1,5 @@ +pr: 119250 +summary: Add rest endpoint for `create_from_source_index` +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119389.yaml b/docs/changelog/119389.yaml new file mode 100644 index 0000000000000..267eaa345b9fd --- /dev/null +++ b/docs/changelog/119389.yaml @@ -0,0 +1,5 @@ +pr: 119389 +summary: Restrict Connector APIs to manage/monitor_connector privileges +area: Extract&Transform +type: feature +issues: [] diff --git a/docs/changelog/119449.yaml b/docs/changelog/119449.yaml new file mode 100644 index 0000000000000..f02bfa6d16d60 --- /dev/null +++ b/docs/changelog/119449.yaml @@ -0,0 +1,5 @@ +pr: 119449 +summary: Add missing traces ilm policy for OTel traces data streams +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/119504.yaml b/docs/changelog/119504.yaml new file mode 100644 index 0000000000000..f63e422face10 --- /dev/null +++ b/docs/changelog/119504.yaml @@ -0,0 +1,5 @@ +pr: 119504 +summary: Optimized index sorting for OTel logs +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/119542.yaml b/docs/changelog/119542.yaml new file mode 100644 index 0000000000000..aaf26c7dc4b0f --- /dev/null +++ b/docs/changelog/119542.yaml @@ -0,0 +1,5 @@ +pr: 119542 +summary: Wait while index is blocked +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/119543.yaml b/docs/changelog/119543.yaml new file mode 100644 index 0000000000000..7027ea2a49672 --- /dev/null +++ b/docs/changelog/119543.yaml @@ -0,0 +1,7 @@ +pr: 119543 +summary: "[Inference API] Fix unique ID message for inference ID matches trained model\ + \ ID" +area: Machine Learning +type: bug +issues: + - 111312 diff --git a/docs/changelog/119691.yaml b/docs/changelog/119691.yaml new file mode 100644 index 0000000000000..186944394908d --- /dev/null +++ b/docs/changelog/119691.yaml @@ -0,0 +1,6 @@ +pr: 119691 +summary: Fix `bbq_hnsw` merge file cleanup on random IO exceptions +area: Vector Search +type: bug +issues: + - 119392 diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index c52315423f87e..a5a813e8d37d5 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -239,6 +239,9 @@ Number of indexing operations, such as `1`. `indexing.index_failed`, `iif`, `indexingIndexFailed`:: Number of failed indexing operations, such as `0`. +`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`:: +Number of failed indexing operations due to version conflict, such as `0`. + `merges.current`, `mc`, `mergesCurrent`:: Number of current merge operations, such as `0`. diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index f73ac6e263cd2..2d3859e74c87e 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -162,6 +162,9 @@ Number of indexing operations, such as `1`. `indexing.index_failed`, `iif`, `indexingIndexFailed`:: Number of failed indexing operations, such as `0`. +`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`:: +Number of failed indexing operations due to version conflict, such as `0`. + `merges.current`, `mc`, `mergesCurrent`:: Number of current merge operations, such as `0`. diff --git a/docs/reference/esql/functions/description/md5.asciidoc b/docs/reference/esql/functions/description/md5.asciidoc new file mode 100644 index 0000000000000..2ad847c0ce0e3 --- /dev/null +++ b/docs/reference/esql/functions/description/md5.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the MD5 hash of the input. diff --git a/docs/reference/esql/functions/description/sha1.asciidoc b/docs/reference/esql/functions/description/sha1.asciidoc new file mode 100644 index 0000000000000..5bc29f86cc591 --- /dev/null +++ b/docs/reference/esql/functions/description/sha1.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the SHA1 hash of the input. diff --git a/docs/reference/esql/functions/description/sha256.asciidoc b/docs/reference/esql/functions/description/sha256.asciidoc new file mode 100644 index 0000000000000..b2a7ef01e1069 --- /dev/null +++ b/docs/reference/esql/functions/description/sha256.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Computes the SHA256 hash of the input. diff --git a/docs/reference/esql/functions/examples/hash.asciidoc b/docs/reference/esql/functions/examples/hash.asciidoc new file mode 100644 index 0000000000000..492e466eb395e --- /dev/null +++ b/docs/reference/esql/functions/examples/hash.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=hash] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=hash-result] +|=== + diff --git a/docs/reference/esql/functions/examples/md5.asciidoc b/docs/reference/esql/functions/examples/md5.asciidoc new file mode 100644 index 0000000000000..0b43bc5b791c9 --- /dev/null +++ b/docs/reference/esql/functions/examples/md5.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=md5] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=md5-result] +|=== + diff --git a/docs/reference/esql/functions/examples/sha1.asciidoc b/docs/reference/esql/functions/examples/sha1.asciidoc new file mode 100644 index 0000000000000..77786431a738a --- /dev/null +++ b/docs/reference/esql/functions/examples/sha1.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=sha1] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=sha1-result] +|=== + diff --git a/docs/reference/esql/functions/examples/sha256.asciidoc b/docs/reference/esql/functions/examples/sha256.asciidoc new file mode 100644 index 0000000000000..801c36d8effc8 --- /dev/null +++ b/docs/reference/esql/functions/examples/sha256.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/hash.csv-spec[tag=sha256] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/hash.csv-spec[tag=sha256-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/hash.json b/docs/reference/esql/functions/kibana/definition/hash.json index 17a60cf45acfe..dbf4a2542afc5 100644 --- a/docs/reference/esql/functions/kibana/definition/hash.json +++ b/docs/reference/esql/functions/kibana/definition/hash.json @@ -77,6 +77,9 @@ "returnType" : "keyword" } ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = hash(\"md5\", message), sha256 = hash(\"sha256\", message) \n| KEEP message, md5, sha256;" + ], "preview" : false, "snapshot_only" : false } diff --git a/docs/reference/esql/functions/kibana/definition/md5.json b/docs/reference/esql/functions/kibana/definition/md5.json new file mode 100644 index 0000000000000..4d3a88e123ff4 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/md5.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "md5", + "description" : "Computes the MD5 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = md5(message)\n| KEEP message, md5;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/sha1.json b/docs/reference/esql/functions/kibana/definition/sha1.json new file mode 100644 index 0000000000000..a6abb31368bb3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sha1.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "sha1", + "description" : "Computes the SHA1 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha1 = sha1(message)\n| KEEP message, sha1;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/sha256.json b/docs/reference/esql/functions/kibana/definition/sha256.json new file mode 100644 index 0000000000000..700425d485b61 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/sha256.json @@ -0,0 +1,37 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "sha256", + "description" : "Computes the SHA256 hash of the input.", + "signatures" : [ + { + "params" : [ + { + "name" : "input", + "type" : "keyword", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "input", + "type" : "text", + "optional" : false, + "description" : "Input to hash." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha256 = sha256(message)\n| KEEP message, sha256;" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/docs/hash.md b/docs/reference/esql/functions/kibana/docs/hash.md index 9826e80ec5bec..4e937778ba67a 100644 --- a/docs/reference/esql/functions/kibana/docs/hash.md +++ b/docs/reference/esql/functions/kibana/docs/hash.md @@ -5,3 +5,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ ### HASH Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = hash("md5", message), sha256 = hash("sha256", message) +| KEEP message, md5, sha256; +``` diff --git a/docs/reference/esql/functions/kibana/docs/md5.md b/docs/reference/esql/functions/kibana/docs/md5.md new file mode 100644 index 0000000000000..aacb8a3960165 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/md5.md @@ -0,0 +1,13 @@ + + +### MD5 +Computes the MD5 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL md5 = md5(message) +| KEEP message, md5; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha1.md b/docs/reference/esql/functions/kibana/docs/sha1.md new file mode 100644 index 0000000000000..a940aa133f06e --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha1.md @@ -0,0 +1,13 @@ + + +### SHA1 +Computes the SHA1 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha1 = sha1(message) +| KEEP message, sha1; +``` diff --git a/docs/reference/esql/functions/kibana/docs/sha256.md b/docs/reference/esql/functions/kibana/docs/sha256.md new file mode 100644 index 0000000000000..fbe576c7c20d6 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/sha256.md @@ -0,0 +1,13 @@ + + +### SHA256 +Computes the SHA256 hash of the input. + +``` +FROM sample_data +| WHERE message != "Connection error" +| EVAL sha256 = sha256(message) +| KEEP message, sha256; +``` diff --git a/docs/reference/esql/functions/layout/hash.asciidoc b/docs/reference/esql/functions/layout/hash.asciidoc index 27c55ada6319b..daf7fbf1170b2 100644 --- a/docs/reference/esql/functions/layout/hash.asciidoc +++ b/docs/reference/esql/functions/layout/hash.asciidoc @@ -12,3 +12,4 @@ image::esql/functions/signature/hash.svg[Embedded,opts=inline] include::../parameters/hash.asciidoc[] include::../description/hash.asciidoc[] include::../types/hash.asciidoc[] +include::../examples/hash.asciidoc[] diff --git a/docs/reference/esql/functions/layout/md5.asciidoc b/docs/reference/esql/functions/layout/md5.asciidoc new file mode 100644 index 0000000000000..82d3031d6bdfd --- /dev/null +++ b/docs/reference/esql/functions/layout/md5.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-md5]] +=== `MD5` + +*Syntax* + +[.text-center] +image::esql/functions/signature/md5.svg[Embedded,opts=inline] + +include::../parameters/md5.asciidoc[] +include::../description/md5.asciidoc[] +include::../types/md5.asciidoc[] +include::../examples/md5.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha1.asciidoc b/docs/reference/esql/functions/layout/sha1.asciidoc new file mode 100644 index 0000000000000..23e1e0e9ac2ab --- /dev/null +++ b/docs/reference/esql/functions/layout/sha1.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha1]] +=== `SHA1` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha1.svg[Embedded,opts=inline] + +include::../parameters/sha1.asciidoc[] +include::../description/sha1.asciidoc[] +include::../types/sha1.asciidoc[] +include::../examples/sha1.asciidoc[] diff --git a/docs/reference/esql/functions/layout/sha256.asciidoc b/docs/reference/esql/functions/layout/sha256.asciidoc new file mode 100644 index 0000000000000..d36a1345271f5 --- /dev/null +++ b/docs/reference/esql/functions/layout/sha256.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-sha256]] +=== `SHA256` + +*Syntax* + +[.text-center] +image::esql/functions/signature/sha256.svg[Embedded,opts=inline] + +include::../parameters/sha256.asciidoc[] +include::../description/sha256.asciidoc[] +include::../types/sha256.asciidoc[] +include::../examples/sha256.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/md5.asciidoc b/docs/reference/esql/functions/parameters/md5.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/md5.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha1.asciidoc b/docs/reference/esql/functions/parameters/sha1.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha1.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/parameters/sha256.asciidoc b/docs/reference/esql/functions/parameters/sha256.asciidoc new file mode 100644 index 0000000000000..99eba4dc2cb3d --- /dev/null +++ b/docs/reference/esql/functions/parameters/sha256.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`input`:: +Input to hash. diff --git a/docs/reference/esql/functions/signature/md5.svg b/docs/reference/esql/functions/signature/md5.svg new file mode 100644 index 0000000000000..419af764a212e --- /dev/null +++ b/docs/reference/esql/functions/signature/md5.svg @@ -0,0 +1 @@ +MD5(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha1.svg b/docs/reference/esql/functions/signature/sha1.svg new file mode 100644 index 0000000000000..bab03a7eb88c8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha1.svg @@ -0,0 +1 @@ +SHA1(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sha256.svg b/docs/reference/esql/functions/signature/sha256.svg new file mode 100644 index 0000000000000..b77126bbefbd8 --- /dev/null +++ b/docs/reference/esql/functions/signature/sha256.svg @@ -0,0 +1 @@ +SHA256(input) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index da9580a55151a..dd10e4c77581e 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -18,11 +18,14 @@ * <> * <> * <> +* <> * <> * <> * <> * <> * <> +* <> +* <> * <> * <> * <> @@ -43,11 +46,14 @@ include::layout/left.asciidoc[] include::layout/length.asciidoc[] include::layout/locate.asciidoc[] include::layout/ltrim.asciidoc[] +include::layout/md5.asciidoc[] include::layout/repeat.asciidoc[] include::layout/replace.asciidoc[] include::layout/reverse.asciidoc[] include::layout/right.asciidoc[] include::layout/rtrim.asciidoc[] +include::layout/sha1.asciidoc[] +include::layout/sha256.asciidoc[] include::layout/space.asciidoc[] include::layout/split.asciidoc[] include::layout/starts_with.asciidoc[] diff --git a/docs/reference/esql/functions/types/md5.asciidoc b/docs/reference/esql/functions/types/md5.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/md5.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha1.asciidoc b/docs/reference/esql/functions/types/sha1.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha1.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/esql/functions/types/sha256.asciidoc b/docs/reference/esql/functions/types/sha256.asciidoc new file mode 100644 index 0000000000000..049a553397bbd --- /dev/null +++ b/docs/reference/esql/functions/types/sha256.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +input | result +keyword | keyword +text | keyword +|=== diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index e3c292cc534bf..30a1039f93db0 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -557,4 +557,3 @@ The API returns the following results: // TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/] // TESTRESPONSE[s/1656087283340/$body.$_path/] // TESTRESPONSE[s/"superuser"/"_es_test_root"/] -// TESTRESPONSE[s/"ignore_throttled" : true/"ignore_throttled" : true,"failure_store":"exclude"/] diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 9b71fe9220385..446d3a409234d 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -350,7 +350,7 @@ include::repository-shared-settings.asciidoc[] will disable retries altogether. Note that if retries are enabled in the Azure client, each of these retries comprises that many client-level retries. -`get_register_retry_delay` +`get_register_retry_delay`:: (<>) Sets the time to wait before trying again if an attempt to read a <> fails. Defaults to `5s`. diff --git a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java index 66ef1f69c8c3a..8b03aeb178587 100644 --- a/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java +++ b/libs/entitlement/bridge/src/main/java/org/elasticsearch/entitlement/bridge/EntitlementChecker.java @@ -37,6 +37,8 @@ public interface EntitlementChecker { void check$java_lang_Runtime$halt(Class callerClass, Runtime runtime, int status); + void check$java_lang_System$$exit(Class callerClass, int status); + //////////////////// // // ClassLoader ctor diff --git a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java index 3c12b2f6bc62b..9869af4d85251 100644 --- a/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java +++ b/libs/entitlement/qa/common/src/main/java/org/elasticsearch/entitlement/qa/common/RestEntitlementsCheckAction.java @@ -83,6 +83,7 @@ static CheckAction alwaysDenied(Runnable action) { private static final Map checkActions = Map.ofEntries( entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), + entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)), @@ -153,6 +154,11 @@ private static void runtimeHalt() { Runtime.getRuntime().halt(123); } + @SuppressForbidden(reason = "Specifically testing System.exit") + private static void systemExit() { + System.exit(123); + } + private static void createClassLoader() { try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { logger.info("Created URLClassLoader [{}]", classLoader.getName()); diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index c0a047dc1a458..686fb73e10bc2 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -51,6 +51,11 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { policyManager.checkExitVM(callerClass); } + @Override + public void check$java_lang_System$$exit(Class callerClass, int status) { + policyManager.checkExitVM(callerClass); + } + @Override public void check$java_lang_ClassLoader$(Class callerClass) { policyManager.checkCreateClassLoader(callerClass); diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java index 99b2a4510bf93..f66683a787bc0 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java @@ -27,7 +27,7 @@ import java.util.Set; import java.util.function.Function; -import static org.elasticsearch.common.settings.Setting.Property.DeprecatedWarning; +import static org.elasticsearch.common.settings.Setting.Property.Deprecated; import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; @@ -250,7 +250,7 @@ private static Setting concreteAgentSetting(String namespace, String qua TELEMETRY_SETTING_PREFIX + "agent.", LEGACY_TRACING_APM_SETTING_PREFIX + "agent.", (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX) - ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, DeprecatedWarning) + ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated) : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) ); @@ -262,7 +262,7 @@ private static Setting concreteAgentSetting(String namespace, String qua LEGACY_TRACING_APM_SETTING_PREFIX + "names.include", OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting( @@ -281,7 +281,7 @@ private static Setting concreteAgentSetting(String namespace, String qua LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude", OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting( @@ -314,7 +314,7 @@ private static Setting concreteAgentSetting(String namespace, String qua ), OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting( @@ -334,7 +334,7 @@ private static Setting concreteAgentSetting(String namespace, String qua false, OperatorDynamic, NodeScope, - DeprecatedWarning + Deprecated ); public static final Setting TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting( @@ -358,7 +358,7 @@ private static Setting concreteAgentSetting(String namespace, String qua public static final Setting TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString( LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token", null, - DeprecatedWarning + Deprecated ); public static final Setting TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString( @@ -373,7 +373,7 @@ private static Setting concreteAgentSetting(String namespace, String qua public static final Setting TRACING_APM_API_KEY_SETTING = SecureSetting.secureString( LEGACY_TRACING_APM_SETTING_PREFIX + "api_key", null, - DeprecatedWarning + Deprecated ); public static final Setting TELEMETRY_API_KEY_SETTING = SecureSetting.secureString( diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index 97a5fabd79f4c..60bc8d1dc6a92 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -20,6 +20,7 @@ restResources { dependencies { testImplementation project(path: ':test:test-clusters') + testImplementation project(":modules:mapper-extras") internalClusterTestImplementation project(":modules:mapper-extras") } @@ -70,4 +71,16 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store with conditions", "Configuring the failure store via data stream templates is not supported anymore.") + + task.skipTest("data_stream/200_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/200_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + + task.skipTest("data_stream/210_rollover_failure_store/A failure store marked for lazy rollover should only be rolled over when there is a failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store with conditions", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after an ingest failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Rolling over a data stream using target_failure_store is no longer supported.") + task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store without conditions", "Rolling over a data stream using target_failure_store is no longer supported.") }) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java index ac73385a97d70..91f18ad3573fd 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamAutoshardingIT.java @@ -493,7 +493,7 @@ private static ShardStats getShardStats(IndexMetadata indexMeta, int shardIndex, CommonStats stats = new CommonStats(); stats.docs = new DocsStats(100, 0, randomByteSizeValue().getBytes()); stats.store = new StoreStats(); - stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); + stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); return new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, false, 0); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 32d080ccc46b1..ac828630b0463 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -31,11 +31,13 @@ import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAlias; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.index.Index; @@ -136,10 +138,7 @@ public void setup() throws Exception { assertTrue(response.isAcknowledged()); // Initialize the failure store. - RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + RolloverRequest rolloverRequest = new RolloverRequest("with-fs::failures", null); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); assertTrue(response.isAcknowledged()); @@ -345,7 +344,7 @@ public void testFailureStoreSnapshotAndRestore() throws Exception { .cluster() .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) - .setIndices(dataStreamName) + .setIndices(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.ALL_APPLICABLE)) .setIncludeGlobalState(false) .get(); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index e9eaf7b5faddb..bee3989d20ff0 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -20,11 +20,12 @@ import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.Strings; @@ -194,9 +195,9 @@ public void testRejectionFromFailureStore() throws IOException { createDataStream(); // Initialize failure store. - var rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() + var rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var failureStoreIndex = rolloverResponse.getNewIndex(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java index 482867d072fc2..54e21d5155ed1 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/DataStreamOptionsIT.java @@ -60,7 +60,7 @@ public void setup() throws IOException { assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); + assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "::failures/_rollover"))); ensureGreen(DATA_STREAM_NAME); final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java deleted file mode 100644 index 85b914be30b2c..0000000000000 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.datastreams; - -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.junit.Before; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -/** - * This should be a yaml test, but in order to write one we would need to expose the new parameter in the rest-api-spec. - * We do not want to do that until the feature flag is removed. For this reason, we temporarily, test the affected APIs here. - * Please convert this to a yaml test when the feature flag is removed. - */ -public class FailureStoreQueryParamIT extends DisabledSecurityDataStreamTestCase { - - private static final String DATA_STREAM_NAME = "failure-data-stream"; - private String backingIndex; - private String failureStoreIndex; - - @SuppressWarnings("unchecked") - @Before - public void setup() throws IOException { - Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/ds-template"); - putComposableIndexTemplateRequest.setJsonEntity(""" - { - "index_patterns": ["failure-data-stream"], - "template": { - "settings": { - "number_of_replicas": 0 - }, - "data_stream_options": { - "failure_store": { - "enabled": true - } - } - }, - "data_stream": { - } - } - """); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - - assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); - // Initialize the failure store. - assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); - ensureGreen(DATA_STREAM_NAME); - - final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); - List dataStreams = (List) entityAsMap(dataStreamResponse).get("data_streams"); - assertThat(dataStreams.size(), is(1)); - Map dataStream = (Map) dataStreams.get(0); - assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); - List backingIndices = getIndices(dataStream); - assertThat(backingIndices.size(), is(1)); - List failureStore = getFailureStore(dataStream); - assertThat(failureStore.size(), is(1)); - backingIndex = backingIndices.get(0); - failureStoreIndex = failureStore.get(0); - } - - public void testGetIndexApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME)); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=exclude")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=only")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testGetIndexStatsApi() throws IOException { - { - final Response statsResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_stats")); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=include") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response statsResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=only") - ); - Map indices = (Map) entityAsMap(statsResponse).get("indices"); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexSettingsApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_settings")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - public void testGetIndexMappingApi() throws IOException { - { - final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping")); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(backingIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(2)); - assertThat(indices.containsKey(backingIndex), is(true)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - { - final Response indicesResponse = client().performRequest( - new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=only") - ); - Map indices = entityAsMap(indicesResponse); - assertThat(indices.size(), is(1)); - assertThat(indices.containsKey(failureStoreIndex), is(true)); - } - } - - @SuppressWarnings("unchecked") - public void testPutIndexMappingApi() throws IOException { - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - assertAcknowledged(client().performRequest(mappingRequest)); - } - { - final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include"); - mappingRequest.setJsonEntity(""" - { - "properties": { - "email": { - "type": "keyword" - } - } - } - """); - ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(mappingRequest)); - Map response = entityAsMap(responseException.getResponse()); - assertThat(((Map) response.get("error")).get("reason"), is("failure index not supported")); - } - } - - @SuppressWarnings("unchecked") - private List getFailureStore(Map response) { - var failureStore = (Map) response.get("failure_store"); - return getIndices(failureStore); - - } - - @SuppressWarnings("unchecked") - private List getIndices(Map response) { - List> indices = (List>) response.get("indices"); - return indices.stream().map(index -> index.get("index_name")).toList(); - } -} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 1d3b1b676282a..cc5e00d8283ad 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -102,10 +103,11 @@ protected ClusterBlockException checkRequestBlock( @Override protected String[] resolveConcreteIndexNames(ClusterState clusterState, DataStreamsStatsAction.Request request) { - return DataStreamsActionUtil.resolveConcreteIndexNames( + return DataStreamsActionUtil.resolveConcreteIndexNamesWithSelector( indexNameExpressionResolver, clusterState, request.indices(), + IndexComponentSelector.ALL_APPLICABLE, request.indicesOptions() ).toArray(String[]::new); } @@ -163,13 +165,17 @@ protected DataStreamsStatsAction.DataStreamShardStats readShardResult(StreamInpu request.indicesOptions(), request.indices() ); - for (String abstractionName : abstractionNames) { - IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); + for (String abstraction : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstraction); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { DataStream dataStream = (DataStream) indexAbstraction; AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); - dataStream.getIndices().stream().map(Index::getName).forEach(index -> { + dataStream.getBackingIndices().getIndices().stream().map(Index::getName).forEach(index -> { + stats.backingIndices.add(index); + allBackingIndices.add(index); + }); + dataStream.getFailureIndices().getIndices().stream().map(Index::getName).forEach(index -> { stats.backingIndices.add(index); allBackingIndices.add(index); }); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 7d2828e30d5ab..7de3f180753f8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -33,7 +33,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -49,6 +49,9 @@ import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -944,11 +947,6 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false) || (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) { UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); - updateMergePolicySettingsRequest.indicesOptions( - IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - ); updateMergePolicySettingsRequest.indices(indexName); updateMergePolicySettingsRequest.settings( Settings.builder() @@ -998,8 +996,11 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice private void rolloverDataStream(String writeIndexName, RolloverRequest rolloverRequest, ActionListener listener) { // "saving" the rollover target name here so we don't capture the entire request - String rolloverTarget = rolloverRequest.getRolloverTarget(); - logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverTarget); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverRequest.getRolloverTarget()); client.admin().indices().rolloverIndex(rolloverRequest, new ActionListener<>() { @Override public void onResponse(RolloverResponse rolloverResponse) { @@ -1014,7 +1015,7 @@ public void onResponse(RolloverResponse rolloverResponse) { logger.info( "Data stream lifecycle successfully rolled over datastream [{}] due to the following met rollover " + "conditions {}. The new index is [{}]", - rolloverTarget, + rolloverRequest.getRolloverTarget(), metConditions, rolloverResponse.getNewIndex() ); @@ -1024,7 +1025,7 @@ public void onResponse(RolloverResponse rolloverResponse) { @Override public void onFailure(Exception e) { - DataStream dataStream = clusterService.state().metadata().dataStreams().get(rolloverTarget); + DataStream dataStream = clusterService.state().metadata().dataStreams().get(resolvedRolloverTarget.resource()); if (dataStream == null || dataStream.getWriteIndex().getName().equals(writeIndexName) == false) { // the data stream has another write index so no point in recording an error for the previous write index we were // attempting to roll over @@ -1407,9 +1408,7 @@ static RolloverRequest getDefaultRolloverRequest( ) { RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE); if (rolloverFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention)); return rolloverRequest; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java index 1595348649528..7992362d791b1 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java @@ -50,7 +50,7 @@ public static final class Request extends AcknowledgedRequest implement .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java index 98a29dd636ddf..860bcb5bf2fbe 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/DeleteDataStreamOptionsAction.java @@ -39,7 +39,9 @@ public static final class Request extends AcknowledgedRequest implement .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); public Request(StreamInput in) throws IOException { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java index c1354da1129ca..45bda1abd5c02 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/GetDataStreamOptionsAction.java @@ -50,7 +50,9 @@ public static class Request extends MasterNodeReadRequest implements In .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private boolean includeDefaults = false; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java index d055a6972312a..d66b45665d4e2 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/PutDataStreamOptionsAction.java @@ -71,7 +71,9 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .wildcardOptions( IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) ) - .gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) + .gatekeeperOptions( + IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false) + ) .build(); private final DataStreamOptions options; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index b61e38297397d..be157608b1c3f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; @@ -42,8 +41,7 @@ public class RestGetDataStreamsAction extends BaseRestHandler { IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, "verbose" - ), - DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(IndicesOptions.FAILURE_STORE_QUERY_PARAM) : Set.of() + ) ) ); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index d5c5193948213..e32636fe40d84 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction; @@ -22,8 +23,12 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStreamFailureStore; +import org.elasticsearch.cluster.metadata.DataStreamOptions; +import org.elasticsearch.cluster.metadata.ResettableValue; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.json.JsonXContent; @@ -40,12 +45,14 @@ import static java.lang.Math.max; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; public class DataStreamsStatsTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(DataStreamsPlugin.class); + return List.of(DataStreamsPlugin.class, MapperExtrasPlugin.class); } private final Set createdDataStreams = new HashSet<>(); @@ -107,8 +114,30 @@ public void testStatsExistingDataStream() throws Exception { assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); } + public void testStatsExistingDataStreamWithFailureStores() throws Exception { + String dataStreamName = createDataStream(false, true); + createFailedDocument(dataStreamName); + + DataStreamsStatsAction.Response stats = getDataStreamsStats(); + + assertEquals(2, stats.getSuccessfulShards()); + assertEquals(0, stats.getFailedShards()); + assertEquals(1, stats.getDataStreamCount()); + assertEquals(2, stats.getBackingIndices()); + assertNotEquals(0L, stats.getTotalStoreSize().getBytes()); + assertEquals(1, stats.getDataStreams().length); + assertEquals(dataStreamName, stats.getDataStreams()[0].getDataStream()); + assertEquals(2, stats.getDataStreams()[0].getBackingIndices()); + // The timestamp is going to not be something we can validate because + // it captures the time of failure which is uncontrolled in the test + // Just make sure it exists by ensuring it isn't zero + assertThat(stats.getDataStreams()[0].getMaximumTimestamp(), is(greaterThan(0L))); + assertNotEquals(0L, stats.getDataStreams()[0].getStoreSize().getBytes()); + assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); + } + public void testStatsExistingHiddenDataStream() throws Exception { - String dataStreamName = createDataStream(true); + String dataStreamName = createDataStream(true, false); long timestamp = createDocument(dataStreamName); DataStreamsStatsAction.Response stats = getDataStreamsStats(true); @@ -221,14 +250,19 @@ public void testStatsMultipleDataStreams() throws Exception { } private String createDataStream() throws Exception { - return createDataStream(false); + return createDataStream(false, false); } - private String createDataStream(boolean hidden) throws Exception { + private String createDataStream(boolean hidden, boolean failureStore) throws Exception { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.getDefault()); + ResettableValue failureStoreOptions = failureStore == false + ? ResettableValue.undefined() + : ResettableValue.create( + new DataStreamOptions.Template(ResettableValue.create(new DataStreamFailureStore.Template(ResettableValue.create(true)))) + ); Template idxTemplate = new Template(null, new CompressedXContent(""" {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} - """), null); + """), null, null, failureStoreOptions); ComposableIndexTemplate template = ComposableIndexTemplate.builder() .indexPatterns(List.of(dataStreamName + "*")) .template(idxTemplate) @@ -269,6 +303,27 @@ private long createDocument(String dataStreamName) throws Exception { return timestamp; } + private long createFailedDocument(String dataStreamName) throws Exception { + // Get some randomized but reasonable timestamps on the data since not all of it is guaranteed to arrive in order. + long timeSeed = System.currentTimeMillis(); + long timestamp = randomLongBetween(timeSeed - TimeUnit.HOURS.toMillis(5), timeSeed); + client().bulk( + new BulkRequest(dataStreamName).add( + new IndexRequest().opType(DocWriteRequest.OpType.CREATE) + .source( + JsonXContent.contentBuilder() + .startObject() + .field("@timestamp", timestamp) + .object("data", b -> b.field("garbage", randomAlphaOfLength(25))) + .endObject() + ) + ) + ).get(); + indicesAdmin().refresh(new RefreshRequest(".fs-" + dataStreamName + "*").indicesOptions(IndicesOptions.lenientExpandOpenHidden())) + .get(); + return timestamp; + } + private DataStreamsStatsAction.Response getDataStreamsStats() throws Exception { return getDataStreamsStats(false); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 698ab427ab040..ac7dabd868a3f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; @@ -46,6 +46,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -225,11 +226,12 @@ public void testOperationsExecutedOnce() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); List deleteRequests = clientSeenRequests.subList(2, 5) .stream() .map(transportRequest -> (DeleteIndexRequest) transportRequest) @@ -1546,11 +1548,12 @@ public void testFailureStoreIsManagedEvenWhenDisabled() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); - assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); - assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); + assertThat( + rolloverFailureIndexRequest.getRolloverTarget(), + is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES)) + ); assertThat( ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], is(dataStream.getFailureIndices().getIndices().get(0).getName()) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml index 13f79e95d99f4..f439cf59bf2d3 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml @@ -148,8 +148,7 @@ # rollover data stream to create new failure store index - do: indices.rollover: - alias: "data-stream-for-modification" - target_failure_store: true + alias: "data-stream-for-modification::failures" - is_true: acknowledged # save index names for later use diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml index cc3a11ffde5e8..51a1e96b1e937 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/210_rollover_failure_store.yml @@ -9,7 +9,7 @@ setup: capabilities: [ 'failure_store_in_template' ] - method: POST path: /{index}/_rollover - capabilities: [ 'lazy-rollover-failure-store' ] + capabilities: [ 'lazy-rollover-failure-store', 'index-expression-selectors' ] - do: allowed_warnings: @@ -58,8 +58,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } @@ -92,8 +91,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 @@ -130,8 +128,7 @@ teardown: - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_primary_shard_docs: 2 @@ -165,8 +162,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -263,8 +259,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: data-stream-for-lazy-rollover - target_failure_store: true + alias: data-stream-for-lazy-rollover::failures lazy: true - match: { acknowledged: true } @@ -332,8 +327,7 @@ teardown: # Mark the failure store for lazy rollover - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" lazy: true - match: { acknowledged: true } @@ -377,16 +371,14 @@ teardown: - do: catch: /Rolling over\/initializing an empty failure store is only supported without conditions\./ indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" body: conditions: max_docs: 1 - do: indices.rollover: - alias: "data-stream-for-rollover" - target_failure_store: true + alias: "data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -424,8 +416,7 @@ teardown: # Initializing should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "_none_" } @@ -448,8 +439,7 @@ teardown: # And "regular" rollover should work - do: indices.rollover: - alias: "other-data-stream-for-rollover" - target_failure_store: true + alias: "other-data-stream-for-rollover::failures" - match: { acknowledged: true } - match: { old_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java index 990a92fb0dbf8..19069c876e7e7 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequestTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.StreamsUtils; @@ -102,7 +101,7 @@ public void testMultiSearchTemplateToJson() throws Exception { String[] indices = { "test" }; SearchRequest searchRequest = new SearchRequest(indices); // scroll is not supported in the current msearch or msearchtemplate api, so unset it: - searchRequest.scroll((Scroll) null); + searchRequest.scroll(null); // batched reduce size is currently not set-able on a per-request basis as it is a query string parameter only searchRequest.setBatchedReduceSize(SearchRequest.DEFAULT_BATCHED_REDUCE_SIZE); SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java index fe591387e9b35..de90ff97e6a95 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java @@ -43,7 +43,6 @@ import org.elasticsearch.script.Metadata; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -212,7 +211,7 @@ static > SearchRequest prep if (mainRequest.getMaxDocs() != MAX_DOCS_ALL_MATCHES && mainRequest.getMaxDocs() <= preparedSearchRequest.source().size() && mainRequest.isAbortOnVersionConflict()) { - preparedSearchRequest.scroll((Scroll) null); + preparedSearchRequest.scroll(null); } return preparedSearchRequest; diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java index ddf73e313e830..bb2073849edc3 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteRequestBuilders.java @@ -53,7 +53,7 @@ static Request initialSearch(SearchRequest searchRequest, BytesReference query, Request request = new Request("POST", path.toString()); if (searchRequest.scroll() != null) { - TimeValue keepAlive = searchRequest.scroll().keepAlive(); + TimeValue keepAlive = searchRequest.scroll(); // V_5_0_0 if (remoteVersion.before(Version.fromId(5000099))) { /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java index 5c3db5aaa6cda..cc08357aa4081 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSource.java @@ -100,7 +100,7 @@ private void onStartResponse(RejectAwareActionListener searchListener, @Override protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, RejectAwareActionListener searchListener) { - TimeValue keepAlive = timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()); + TimeValue keepAlive = timeValueNanos(searchRequest.scroll().nanos() + extraKeepAlive.nanos()); execute(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index 26b35a5bbd4b7..28f2eafc20a6e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -597,7 +597,7 @@ protected RequestWrapper buildRequest(Hit doc) { capturedCommand.get().run(); // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) - assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); + assertThat(client.lastScroll.get().request.scroll().seconds(), either(equalTo(110L)).or(equalTo(109L))); // Now we can simulate a response and check the delay that we used for the task if (randomBoolean()) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 1c104cbd08197..5f4e2b3a55156 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -155,10 +155,7 @@ public void testScrollKeepAlive() { ); hitSource.startNextScroll(timeValueSeconds(100)); - client.validateRequest( - TransportSearchScrollAction.TYPE, - (SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110) - ); + client.validateRequest(TransportSearchScrollAction.TYPE, (SearchScrollRequest r) -> assertEquals(r.scroll().seconds(), 110)); } private SearchResponse createSearchResponse() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java index 766c3ff695f84..bcc6177f8363c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexRestClientSslTests.java @@ -113,14 +113,20 @@ private static SSLContext buildServerSslContext() throws Exception { } public void testClientFailsWithUntrustedCertificate() throws IOException { - assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm()); final List threads = new ArrayList<>(); final Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); final Settings settings = builder.build(); final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); try (RestClient client = Reindexer.buildRestClient(getRemoteInfo(), ssl, 1L, threads)) { - expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + if (inFipsJvm()) { + // Bouncy Castle throws a different exception + IOException exception = expectThrows(IOException.class, () -> client.performRequest(new Request("GET", "/"))); + assertThat(exception.getCause(), Matchers.instanceOf(javax.net.ssl.SSLException.class)); + } else { + expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); + + } } } diff --git a/muted-tests.yml b/muted-tests.yml index 4993c66d14716..b49cd0a32bfed 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -102,9 +102,6 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 -- class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT - method: testILMDownsampleRollingRestart - issue: https://github.com/elastic/elasticsearch/issues/114233 - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 @@ -236,44 +233,38 @@ tests: - class: org.elasticsearch.smoketest.MlWithSecurityIT method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} issue: https://github.com/elastic/elasticsearch/issues/119548 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=[9.0.0, 8.18.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119549 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testMountSearchableSnapshot {p0=[9.0.0, 8.18.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119550 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119551 - class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests method: testSkipNonRootOfNestedDocuments issue: https://github.com/elastic/elasticsearch/issues/119553 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 8.18.0]} - issue: https://github.com/elastic/elasticsearch/issues/119560 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 9.0.0]} - issue: https://github.com/elastic/elasticsearch/issues/119561 -- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 9.0.0]} - issue: https://github.com/elastic/elasticsearch/issues/119562 - class: org.elasticsearch.xpack.ml.integration.ForecastIT method: testOverflowToDisk issue: https://github.com/elastic/elasticsearch/issues/117740 - class: org.elasticsearch.xpack.security.authc.ldap.MultiGroupMappingIT issue: https://github.com/elastic/elasticsearch/issues/119599 -- class: org.elasticsearch.lucene.FullClusterRestartSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=8.18.0} - issue: https://github.com/elastic/elasticsearch/issues/119631 -- class: org.elasticsearch.lucene.FullClusterRestartSearchableSnapshotIndexCompatibilityIT - method: testSearchableSnapshotUpgrade {p0=9.0.0} - issue: https://github.com/elastic/elasticsearch/issues/119632 - class: org.elasticsearch.search.profile.dfs.DfsProfilerIT method: testProfileDfs issue: https://github.com/elastic/elasticsearch/issues/119711 -- class: org.elasticsearch.upgrades.DataStreamsUpgradeIT - method: testUpgradeDataStream - issue: https://github.com/elastic/elasticsearch/issues/119717 +- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests + method: testSingleMatchFunctionFilterPushdownWithStringValues {default} + issue: https://github.com/elastic/elasticsearch/issues/119720 +- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests + method: testSingleMatchFunctionPushdownWithCasting {default} + issue: https://github.com/elastic/elasticsearch/issues/119722 +- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests + method: testSingleMatchOperatorFilterPushdownWithStringValues {default} + issue: https://github.com/elastic/elasticsearch/issues/119721 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testConcurrentSerialization + issue: https://github.com/elastic/elasticsearch/issues/119819 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testEqualsAndHashcode + issue: https://github.com/elastic/elasticsearch/issues/119820 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testConcurrentEquals + issue: https://github.com/elastic/elasticsearch/issues/119821 +- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests + method: testSerialization + issue: https://github.com/elastic/elasticsearch/issues/119822 # Examples: # diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index e63b1629db39c..5bbade8cf6fce 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -50,8 +50,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) } - - onlyIf("FIPS mode disabled") { buildParams.inFipsJvm == false } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { diff --git a/qa/lucene-index-compatibility/build.gradle b/qa/lucene-index-compatibility/build.gradle index 37e5eea85a08b..3b2e69ec9859f 100644 --- a/qa/lucene-index-compatibility/build.gradle +++ b/qa/lucene-index-compatibility/build.gradle @@ -14,7 +14,9 @@ buildParams.bwcVersions.withLatestReadOnlyIndexCompatible { bwcVersion -> tasks.named("javaRestTest").configure { systemProperty("tests.minimum.index.compatible", bwcVersion) usesBwcDistribution(bwcVersion) - enabled = true + + // Tests rely on unreleased code in 8.18 branch + enabled = buildParams.isSnapshotBuild() } } @@ -22,4 +24,3 @@ tasks.withType(Test).configureEach { // CI doesn't like it when there's multiple clusters running at once maxParallelForks = 1 } - diff --git a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java index 8c9a42dc926e9..13c647983fad5 100644 --- a/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java +++ b/qa/lucene-index-compatibility/src/javaRestTest/java/org/elasticsearch/lucene/AbstractIndexCompatibilityTestCase.java @@ -207,7 +207,7 @@ protected static void updateRandomIndexSettings(String indexName) throws IOExcep switch (i) { case 0 -> settings.putList(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), "field_" + randomInt(2)); case 1 -> settings.put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), randomIntBetween(1, 100)); - case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(0L, 1000L)); + case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(100L, 1000L)); case 3 -> settings.put(IndexSettings.MAX_SLICES_PER_SCROLL.getKey(), randomIntBetween(1, 1024)); default -> throw new IllegalStateException(); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json index 299c24f987d8d..47a1bee665506 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.rollover.json @@ -63,12 +63,6 @@ "type":"boolean", "default":"false", "description":"If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams." - }, - "target_failure_store":{ - "type":"boolean", - "description":"If set to true, the rollover action will be applied on the failure store of the data stream.", - "visibility": "feature_flag", - "feature_flag": "es.failure_store_feature_flag_enabled" } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json b/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json index 986040d69cb4f..9fb85807d611f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/license.post_start_trial.json @@ -31,10 +31,6 @@ "master_timeout": { "type": "time", "description": "Timeout for processing on master node" - }, - "timeout": { - "type": "time", - "description": "Timeout for acknowledgement of update from all nodes in cluster" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json new file mode 100644 index 0000000000000..e17a69a77b252 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/migrate.create_from.json @@ -0,0 +1,37 @@ +{ + "migrate.create_from":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html", + "description":"This API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values." + }, + "stability":"experimental", + "visibility":"private", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_create_from/{source}/{dest}", + "methods":[ "PUT", "POST"], + "parts":{ + "source":{ + "type":"string", + "description":"The source index name" + }, + "dest":{ + "type":"string", + "description":"The destination index name" + } + } + } + ] + }, + "body":{ + "description":"The body contains the fields `mappings_override` and `settings_override`.", + "required":false + } + } +} + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json index d990d1da1f144..6f1ec484e94d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.delete_node.json @@ -26,6 +26,15 @@ } ] }, - "params":{} + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json index bf20cf3b70bac..90b19557f5fb2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/shutdown.put_node.json @@ -26,7 +26,16 @@ } ] }, - "params":{}, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + } + }, "body":{ "description":"The shutdown type definition to register", "required": true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json index 68b2a5d2c2c8b..35895f0ddb581 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/xpack.info.json @@ -20,6 +20,12 @@ ] }, "params":{ + "human":{ + "type":"boolean", + "required":false, + "description":"Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. The default value is true.", + "default":true + }, "categories":{ "type":"list", "description":"Comma-separated list of info categories. Can be any of: build, license, features" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml index 03d8b2068d23e..45f381eab80b1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -45,6 +45,7 @@ indexing.index_time .+ \n indexing.index_total .+ \n indexing.index_failed .+ \n + indexing.index_failed_due_to_version_conflict .+ \n merges.current .+ \n merges.current_docs .+ \n merges.current_size .+ \n diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java index 31c7720ffec1c..fee2c0494365e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/IndicesMetricsIT.java @@ -30,6 +30,7 @@ import java.io.IOException; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -76,6 +77,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String STANDARD_INDEXING_COUNT = "es.indices.standard.indexing.total"; static final String STANDARD_INDEXING_TIME = "es.indices.standard.indexing.time"; static final String STANDARD_INDEXING_FAILURE = "es.indices.standard.indexing.failure.total"; + static final String STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.standard.indexing.failure.version_conflict.total"; static final String TIME_SERIES_INDEX_COUNT = "es.indices.time_series.total"; static final String TIME_SERIES_BYTES_SIZE = "es.indices.time_series.size"; @@ -89,6 +91,8 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String TIME_SERIES_INDEXING_COUNT = "es.indices.time_series.indexing.total"; static final String TIME_SERIES_INDEXING_TIME = "es.indices.time_series.indexing.time"; static final String TIME_SERIES_INDEXING_FAILURE = "es.indices.time_series.indexing.failure.total"; + static final String TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = + "es.indices.time_series.indexing.failure.version_conflict.total"; static final String LOGSDB_INDEX_COUNT = "es.indices.logsdb.total"; static final String LOGSDB_BYTES_SIZE = "es.indices.logsdb.size"; @@ -102,6 +106,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { static final String LOGSDB_INDEXING_COUNT = "es.indices.logsdb.indexing.total"; static final String LOGSDB_INDEXING_TIME = "es.indices.logsdb.indexing.time"; static final String LOGSDB_INDEXING_FAILURE = "es.indices.logsdb.indexing.failure.total"; + static final String LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.logsdb.indexing.failure.version_conflict.total"; public void testIndicesMetrics() { String indexNode = internalCluster().startNode(); @@ -132,7 +137,9 @@ public void testIndicesMetrics() { STANDARD_INDEXING_TIME, greaterThanOrEqualTo(0L), STANDARD_INDEXING_FAILURE, - equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexCount()) + equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()), + STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount()) ) ); @@ -155,7 +162,9 @@ public void testIndicesMetrics() { TIME_SERIES_INDEXING_TIME, greaterThanOrEqualTo(0L), TIME_SERIES_INDEXING_FAILURE, - equalTo(indexing2.getIndexFailedCount() - indexing1.getIndexFailedCount()) + equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()), + TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount()) ) ); @@ -177,13 +186,14 @@ public void testIndicesMetrics() { LOGSDB_INDEXING_TIME, greaterThanOrEqualTo(0L), LOGSDB_INDEXING_FAILURE, - equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()) + equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()), + LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(indexing3.getIndexFailedDueToVersionConflictCount() - indexing2.getIndexFailedDueToVersionConflictCount()) ) ); // already collected indexing stats - collectThenAssertMetrics( - telemetry, - 4, + Map> zeroMatchers = new HashMap<>(); + zeroMatchers.putAll( Map.of( STANDARD_INDEXING_COUNT, equalTo(0L), @@ -191,22 +201,35 @@ public void testIndicesMetrics() { equalTo(0L), STANDARD_INDEXING_FAILURE, equalTo(0L), - + STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(0L) + ) + ); + zeroMatchers.putAll( + Map.of( TIME_SERIES_INDEXING_COUNT, equalTo(0L), TIME_SERIES_INDEXING_TIME, equalTo(0L), TIME_SERIES_INDEXING_FAILURE, equalTo(0L), - + TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, + equalTo(0L) + ) + ); + zeroMatchers.putAll( + Map.of( LOGSDB_INDEXING_COUNT, equalTo(0L), LOGSDB_INDEXING_TIME, equalTo(0L), LOGSDB_INDEXING_FAILURE, + equalTo(0L), + LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT, equalTo(0L) ) ); + collectThenAssertMetrics(telemetry, 4, zeroMatchers); String searchNode = internalCluster().startDataOnlyNode(); indicesService = internalCluster().getInstance(IndicesService.class, searchNode); telemetry = internalCluster().getInstance(PluginsService.class, searchNode) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java index 1635a08e1768b..04130d176b9e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/monitor/metrics/NodeIndexingMetricsIT.java @@ -9,14 +9,17 @@ package org.elasticsearch.monitor.metrics; +import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -25,6 +28,13 @@ import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.rest.RestStatus; @@ -43,13 +53,16 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import static java.util.Collections.singletonMap; import static org.elasticsearch.index.IndexingPressure.MAX_COORDINATING_BYTES; import static org.elasticsearch.index.IndexingPressure.MAX_PRIMARY_BYTES; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) @@ -66,7 +79,7 @@ public List> getSettings() { @Override protected Collection> nodePlugins() { - return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class); + return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class, TestAnalysisPlugin.class); } @Override @@ -77,6 +90,197 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { .build(); } + public void testZeroMetricsForVersionConflictsForNonIndexingOperations() { + final String dataNode = internalCluster().startNode(); + ensureStableCluster(1); + + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + plugin.resetMeter(); + + assertAcked(prepareCreate("index_no_refresh", Settings.builder().put("index.refresh_interval", "-1"))); + assertAcked(prepareCreate("index_with_default_refresh")); + + for (String indexName : List.of("index_no_refresh", "index_with_default_refresh")) { + String docId = randomUUID(); + client(dataNode).index(new IndexRequest(indexName).id(docId).source(Map.of())).actionGet(); + // test version conflicts are counted when getting from the translog + if (randomBoolean()) { + // this get has the side effect of tracking translog location in the live version map, + // which potentially influences the engine conflict exception path + client(dataNode).get(new GetRequest(indexName, docId).realtime(randomBoolean())).actionGet(); + } + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).get( + new GetRequest(indexName, docId).version(10).versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + if (randomBoolean()) { + client(dataNode).get(new GetRequest(indexName, docId).realtime(false)).actionGet(); + } + client(dataNode).admin().indices().prepareRefresh(indexName).get(); + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).get( + new GetRequest(indexName, docId).version(5) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + .realtime(false) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // updates + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).update( + new UpdateRequest(indexName, docId).setIfPrimaryTerm(1) + .setIfSeqNo(randomIntBetween(2, 5)) + .doc(Map.of(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // deletes + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).delete( + new DeleteRequest(indexName, docId).setIfPrimaryTerm(randomIntBetween(2, 5)).setIfSeqNo(0) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + } + + // simulate async apm `polling` call for metrics + plugin.collect(); + + // there are no indexing (version conflict) failures reported because only gets/updates/deletes generated the conflicts + // and those are not "indexing" operations + var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); + assertThat(indexingFailedTotal.getLong(), equalTo(0L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L)); + } + + public void testMetricsForIndexingVersionConflicts() { + final String dataNode = internalCluster().startNode(); + ensureStableCluster(1); + + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + plugin.resetMeter(); + + assertAcked( + prepareCreate( + "test", + Settings.builder() + .put("index.refresh_interval", "-1") + .put("index.analysis.analyzer.test_analyzer.type", "custom") + .put("index.analysis.analyzer.test_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.test_analyzer.filter", "test_token_filter") + ).setMapping(Map.of("properties", Map.of("test_field", Map.of("type", "text", "analyzer", "test_analyzer")))).get() + ); + + String docId = randomUUID(); + // successful index (with version) + client(dataNode).index( + new IndexRequest("test").id(docId) + .version(10) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + .source(Map.of()) + ).actionGet(); + // if_primary_term conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(0).setIfPrimaryTerm(2)) + .actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // if_seq_no conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(1).setIfPrimaryTerm(1)) + .actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // version conflict + { + var e = expectThrows( + VersionConflictEngineException.class, + () -> client(dataNode).index( + new IndexRequest("test").id(docId) + .source(Map.of()) + .version(3) + .versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)) + ).actionGet() + ); + assertThat(e.getMessage(), containsString("version conflict")); + assertThat(e.status(), is(RestStatus.CONFLICT)); + } + // indexing failure that is NOT a version conflict + PluginsService pluginService = internalCluster().getInstance(PluginsService.class, dataNode); + pluginService.filterPlugins(TestAnalysisPlugin.class).forEach(p -> p.throwParsingError.set(true)); + { + var e = expectThrows( + MapperParsingException.class, + () -> client(dataNode).index(new IndexRequest("test").id(docId + "other").source(Map.of("test_field", "this will error"))) + .actionGet() + ); + assertThat(e.status(), is(RestStatus.BAD_REQUEST)); + } + + plugin.collect(); + + var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); + assertThat(indexingFailedTotal.getLong(), equalTo(4L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(3L)); + } + + public static final class TestAnalysisPlugin extends Plugin implements AnalysisPlugin { + final AtomicBoolean throwParsingError = new AtomicBoolean(false); + + @Override + public Map> getTokenFilters() { + return singletonMap("test_token_filter", (indexSettings, environment, name, settings) -> new AbstractTokenFilterFactory(name) { + @Override + public TokenStream create(TokenStream tokenStream) { + if (throwParsingError.get()) { + throw new MapperParsingException("simulate mapping parsing error"); + } + return tokenStream; + } + }); + } + } + public void testNodeIndexingMetricsArePublishing() { final String dataNode = internalCluster().startNode(); @@ -116,6 +320,11 @@ public void testNodeIndexingMetricsArePublishing() { var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); assertThat(indexingFailedTotal.getLong(), equalTo(0L)); + var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric( + plugin::getLongAsyncCounterMeasurement, + "es.indexing.indexing.failed.version_conflict.total" + ); + assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L)); var deletionTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.deletion.docs.total"); assertThat(deletionTotal.getLong(), equalTo((long) deletesCount)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index e079994003751..37e299c816562 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.sort.ShardDocSortField; @@ -97,14 +96,14 @@ public void testSearchSort() throws Exception { int fetchSize = randomIntBetween(10, 100); // test _doc sort SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); assertSearchSlicesWithScroll(request, field, max, numDocs); // test numeric sort request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .addSort(SortBuilders.fieldSort("random_int")) .setSize(fetchSize); assertSearchSlicesWithScroll(request, field, max, numDocs); @@ -121,7 +120,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .setPreference("_shards:1,4") .addSort(SortBuilders.fieldSort("_doc")); @@ -133,7 +132,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .setRouting("foo", "bar") .addSort(SortBuilders.fieldSort("_doc")); @@ -151,7 +150,7 @@ public void testWithPreferenceAndRoutings() throws Exception { int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = prepareSearch("alias1", "alias3").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); assertSearchSlicesWithScroll(request, "_id", max, numDocs); @@ -176,7 +175,7 @@ private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String f searchResponse.decRef(); searchResponse = client().prepareSearchScroll("test") .setScrollId(scrollId) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .get(); scrollId = searchResponse.getScrollId(); totalResults += searchResponse.getHits().getHits().length; @@ -271,7 +270,7 @@ public void testInvalidFields() throws Exception { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .slice(new SliceBuilder("invalid_random_int", 0, 10)) ); @@ -282,7 +281,7 @@ public void testInvalidFields() throws Exception { exc = expectThrows( SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery()) - .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setScroll(TimeValue.timeValueSeconds(10)) .slice(new SliceBuilder("invalid_random_kw", 0, 10)) ); rootCause = findRootCause(exc); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 22f69edc3a5f8..c5bb47ce1e4f7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -151,6 +151,10 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_CCS_TELEMETRY_STATS = def(8_816_00_0); public static final TransportVersion TEXT_EMBEDDING_QUERY_VECTOR_BUILDER_INFER_MODEL_ID = def(8_817_00_0); public static final TransportVersion ESQL_ENABLE_NODE_LEVEL_REDUCTION = def(8_818_00_0); + public static final TransportVersion JINA_AI_INTEGRATION_ADDED = def(8_819_00_0); + public static final TransportVersion TRACK_INDEX_FAILED_DUE_TO_VERSION_CONFLICT_METRIC = def(8_820_00_0); + public static final TransportVersion REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX = def(8_821_00_0); + public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 03e05ca0e4247..24c427c32d69a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -44,9 +43,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 801dbbdee0858..be7aaeec8f69e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; @@ -95,13 +94,7 @@ public static Feature[] fromRequest(RestRequest request) { private transient boolean includeDefaults = false; public GetIndexRequest() { - super( - DataStream.isFailureStoreFeatureFlagEnabled() - ? IndicesOptions.builder(IndicesOptions.strictExpandOpen()) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) - .build() - : IndicesOptions.strictExpandOpen() - ); + super(IndicesOptions.strictExpandOpen()); } public GetIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 7b782c6da5a84..05cc0d2cf05d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -82,7 +82,7 @@ public class PutMappingRequest extends AcknowledgedRequest im .allowClosedIndices(true) .allowAliasToMultipleIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 749470e181deb..24f8735b6bd7f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetadataMappingService; import org.elasticsearch.cluster.service.ClusterService; @@ -40,6 +42,7 @@ import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.SortedMap; /** * Put mapping action. @@ -106,7 +109,14 @@ protected void masterOperation( return; } - final String message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); + String message = checkForFailureStoreViolations(clusterService.state(), concreteIndices, request); + if (message != null) { + logger.warn(message); + listener.onFailure(new IllegalStateException(message)); + return; + } + + message = checkForSystemIndexViolations(systemIndices, concreteIndices, request); if (message != null) { logger.warn(message); listener.onFailure(new IllegalStateException(message)); @@ -172,6 +182,33 @@ static void performMappingUpdate( metadataMappingService.putMapping(updateRequest, wrappedListener); } + static String checkForFailureStoreViolations(ClusterState clusterState, Index[] concreteIndices, PutMappingRequest request) { + // Requests that a cluster generates itself are permitted to make changes to mappings + // so that rolling upgrade scenarios still work. We check this via the request's origin. + if (Strings.isNullOrEmpty(request.origin()) == false) { + return null; + } + + List violations = new ArrayList<>(); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + for (Index index : concreteIndices) { + IndexAbstraction indexAbstraction = indicesLookup.get(index.getName()); + if (indexAbstraction != null) { + DataStream maybeDataStream = indexAbstraction.getParentDataStream(); + if (maybeDataStream != null && maybeDataStream.isFailureStoreIndex(index.getName())) { + violations.add(index.getName()); + } + } + } + + if (violations.isEmpty() == false) { + return "Cannot update mappings in " + + violations + + ": mappings for indices contained in data stream failure stores cannot be updated"; + } + return null; + } + static String checkForSystemIndexViolations(SystemIndices systemIndices, Index[] concreteIndices, PutMappingRequest request) { // Requests that a cluster generates itself are permitted to have a difference in mappings // so that rolling upgrade scenarios still work. We check this via the request's origin. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index f5c100b7884bb..4aa022aff1c80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -59,6 +59,7 @@ import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; +import java.util.stream.Stream; import static org.elasticsearch.action.search.TransportSearchHelper.checkCCSVersionCompatibility; @@ -598,12 +599,13 @@ private static void mergeResults( private static void enrichIndexAbstraction( ClusterState clusterState, - ResolvedExpression indexAbstraction, + ResolvedExpression resolvedExpression, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction.resource()); + SortedMap indicesLookup = clusterState.metadata().getIndicesLookup(); + IndexAbstraction ia = indicesLookup.get(resolvedExpression.resource()); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { @@ -632,13 +634,24 @@ private static void enrichIndexAbstraction( ); } case ALIAS -> { - String[] indexNames = ia.getIndices().stream().map(Index::getName).toArray(String[]::new); + String[] indexNames = getAliasIndexStream(resolvedExpression, ia, indicesLookup).map(Index::getName) + .toArray(String[]::new); Arrays.sort(indexNames); aliases.add(new ResolvedAlias(ia.getName(), indexNames)); } case DATA_STREAM -> { DataStream dataStream = (DataStream) ia; - String[] backingIndices = dataStream.getIndices().stream().map(Index::getName).toArray(String[]::new); + Stream dataStreamIndices = resolvedExpression.selector() == null + ? dataStream.getIndices().stream() + : switch (resolvedExpression.selector()) { + case DATA -> dataStream.getBackingIndices().getIndices().stream(); + case FAILURES -> dataStream.getFailureIndices().getIndices().stream(); + case ALL_APPLICABLE -> Stream.concat( + dataStream.getBackingIndices().getIndices().stream(), + dataStream.getFailureIndices().getIndices().stream() + ); + }; + String[] backingIndices = dataStreamIndices.map(Index::getName).toArray(String[]::new); dataStreams.add(new ResolvedDataStream(dataStream.getName(), backingIndices, DataStream.TIMESTAMP_FIELD_NAME)); } default -> throw new IllegalStateException("unknown index abstraction type: " + ia.getType()); @@ -646,6 +659,52 @@ private static void enrichIndexAbstraction( } } + private static Stream getAliasIndexStream( + ResolvedExpression resolvedExpression, + IndexAbstraction ia, + SortedMap indicesLookup + ) { + Stream aliasIndices; + if (resolvedExpression.selector() == null) { + aliasIndices = ia.getIndices().stream(); + } else { + aliasIndices = switch (resolvedExpression.selector()) { + case DATA -> ia.getIndices().stream(); + case FAILURES -> { + assert ia.isDataStreamRelated() : "Illegal selector [failures] used on non data stream alias"; + yield ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()); + } + case ALL_APPLICABLE -> { + if (ia.isDataStreamRelated()) { + yield Stream.concat( + ia.getIndices().stream(), + ia.getIndices() + .stream() + .map(Index::getName) + .map(indicesLookup::get) + .map(IndexAbstraction::getParentDataStream) + .filter(Objects::nonNull) + .distinct() + .map(DataStream::getFailureIndices) + .flatMap(failureIndices -> failureIndices.getIndices().stream()) + ); + } else { + yield ia.getIndices().stream(); + } + } + }; + } + return aliasIndices; + } + enum Attribute { OPEN, CLOSED, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java index a677897d79633..7b28acdbd8f84 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/LazyRolloverAction.java @@ -21,6 +21,8 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -119,32 +121,38 @@ protected void masterOperation( : "The auto rollover action does not expect any other parameters in the request apart from the data stream name"; Metadata metadata = clusterState.metadata(); - DataStream dataStream = metadata.dataStreams().get(rolloverRequest.getRolloverTarget()); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + + DataStream dataStream = metadata.dataStreams().get(resolvedRolloverTarget.resource()); // Skip submitting the task if we detect that the lazy rollover has been already executed. - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); listener.onResponse(noopLazyRolloverResponse(targetIndices)); return; } // We evaluate the names of the source index as well as what our newly created index would be. final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, clusterState.metadata(), clusterState.routingTable()); - assert metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()) : "Auto-rollover applies only to data streams"; + assert metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()) : "Auto-rollover applies only to data streams"; String source = "lazy_rollover source [" + trialSourceIndexName + "] to target [" + trialRolloverIndexName + "]"; // We create a new rollover request to ensure that it doesn't contain any other parameters apart from the data stream name // This will provide a more resilient user experience - var newRolloverRequest = new RolloverRequest(rolloverRequest.getRolloverTarget(), null); - newRolloverRequest.setIndicesOptions(rolloverRequest.indicesOptions()); + var newRolloverRequest = new RolloverRequest(resolvedRolloverTarget.combined(), null); LazyRolloverTask rolloverTask = new LazyRolloverTask(newRolloverRequest, listener); lazyRolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } @@ -223,12 +231,19 @@ public ClusterState executeTask( AllocationActionMultiListener allocationActionMultiListener ) throws Exception { + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean isFailureStoreRollover = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // If the data stream has been rolled over since it was marked for lazy rollover, this operation is a noop - final DataStream dataStream = currentState.metadata().dataStreams().get(rolloverRequest.getRolloverTarget()); + final DataStream dataStream = currentState.metadata().dataStreams().get(resolvedRolloverTarget.resource()); assert dataStream != null; - if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { - final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + if (isLazyRolloverNeeded(dataStream, isFailureStoreRollover) == false) { + final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); var noopResponse = noopLazyRolloverResponse(targetIndices); notifyAllListeners(rolloverTaskContexts, context -> context.getTask().listener.onResponse(noopResponse)); return currentState; @@ -237,7 +252,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), List.of(), @@ -246,7 +261,7 @@ public ClusterState executeTask( false, null, null, - rolloverRequest.targetsFailureStore() + isFailureStoreRollover ); results.add(rolloverResult); logger.trace("lazy rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 552ce727d4249..608d32d50a856 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -16,7 +16,8 @@ import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MapperService; @@ -81,7 +82,7 @@ public class RolloverRequest extends AcknowledgedRequest implem private RolloverConditions conditions = new RolloverConditions(); // the index name "_na_" is never read back, what matters are settings, mappings and aliases private CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); - private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictSingleIndexNoExpandForbidClosedAllowSelectors(); public RolloverRequest(StreamInput in) throws IOException { super(in); @@ -125,12 +126,15 @@ public ActionRequestValidationException validate() { ); } - var selector = indicesOptions.selectorOptions().defaultSelector(); - if (selector == IndexComponentSelector.ALL_APPLICABLE) { - validationException = addValidationError( - "rollover cannot be applied to both regular and failure indices at the same time", - validationException - ); + if (rolloverTarget != null) { + ResolvedExpression resolvedExpression = SelectorResolver.parseExpression(rolloverTarget, indicesOptions); + IndexComponentSelector selector = resolvedExpression.selector(); + if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) { + validationException = addValidationError( + "rollover cannot be applied to both regular and failure indices at the same time", + validationException + ); + } } return validationException; @@ -162,13 +166,6 @@ public IndicesOptions indicesOptions() { return indicesOptions; } - /** - * @return true of the rollover request targets the failure store, false otherwise. - */ - public boolean targetsFailureStore() { - return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.includeFailureIndices(); - } - public void setIndicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c5c874f9bcddf..4f0aa9c5bade4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -36,6 +36,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataStats; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -149,8 +151,7 @@ protected ClusterBlockException checkBlock(RolloverRequest request, ClusterState .matchOpen(request.indicesOptions().expandWildcardsOpen()) .matchClosed(request.indicesOptions().expandWildcardsClosed()) .build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - request.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); return state.blocks() @@ -170,11 +171,18 @@ protected void masterOperation( assert task instanceof CancellableTask; Metadata metadata = clusterState.metadata(); + + // Parse the rollover request's target since the expression it may contain a selector on it + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null && resolvedRolloverTarget.selector().shouldIncludeFailures(); + // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. - boolean targetFailureStore = rolloverRequest.targetsFailureStore(); final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), targetFailureStore @@ -183,7 +191,7 @@ protected void masterOperation( final String trialRolloverIndexName = trialRolloverNames.rolloverName(); MetadataCreateIndexService.validateIndexName(trialRolloverIndexName, metadata, clusterState.routingTable()); - boolean isDataStream = metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()); + boolean isDataStream = metadata.dataStreams().containsKey(resolvedRolloverTarget.resource()); if (rolloverRequest.isLazy()) { if (isDataStream == false || rolloverRequest.getConditions().hasConditions()) { String message; @@ -201,7 +209,7 @@ protected void masterOperation( } if (rolloverRequest.isDryRun() == false) { metadataDataStreamsService.setRolloverOnWrite( - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), true, targetFailureStore, rolloverRequest.ackTimeout(), @@ -225,7 +233,7 @@ protected void masterOperation( final IndexAbstraction rolloverTargetAbstraction = clusterState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); if (rolloverTargetAbstraction.getType() == IndexAbstraction.Type.ALIAS && rolloverTargetAbstraction.isDataStreamRelated()) { listener.onFailure( new IllegalStateException("Aliases to data streams cannot be rolled over. Please rollover the data stream itself.") @@ -246,10 +254,10 @@ protected void masterOperation( final var statsIndicesOptions = new IndicesOptions( IndicesOptions.ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS, IndicesOptions.WildcardOptions.builder().matchClosed(true).allowEmptyExpressions(false).build(), - IndicesOptions.GatekeeperOptions.DEFAULT, - rolloverRequest.indicesOptions().selectorOptions() + IndicesOptions.GatekeeperOptions.DEFAULT ); - IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(rolloverRequest.getRolloverTarget()) + // Make sure to recombine any selectors on the stats request + IndicesStatsRequest statsRequest = new IndicesStatsRequest().indices(resolvedRolloverTarget.combined()) .clear() .indicesOptions(statsIndicesOptions) .docs(true) @@ -266,9 +274,7 @@ protected void masterOperation( listener.delegateFailureAndWrap((delegate, statsResponse) -> { AutoShardingResult rolloverAutoSharding = null; - final IndexAbstraction indexAbstraction = clusterState.metadata() - .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + final IndexAbstraction indexAbstraction = clusterState.metadata().getIndicesLookup().get(resolvedRolloverTarget.resource()); if (indexAbstraction.getType().equals(IndexAbstraction.Type.DATA_STREAM)) { DataStream dataStream = (DataStream) indexAbstraction; final Optional indexStats = Optional.ofNullable(statsResponse) @@ -492,14 +498,20 @@ public ClusterState executeTask( ) throws Exception { final var rolloverTask = rolloverTaskContext.getTask(); final var rolloverRequest = rolloverTask.rolloverRequest(); + ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression( + rolloverRequest.getRolloverTarget(), + rolloverRequest.indicesOptions() + ); + boolean targetFailureStore = resolvedRolloverTarget.selector() != null + && resolvedRolloverTarget.selector().shouldIncludeFailures(); // Regenerate the rollover names, as a rollover could have happened in between the pre-check and the cluster state update final var rolloverNames = MetadataRolloverService.resolveRolloverNames( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); // Re-evaluate the conditions, now with our final source index name @@ -532,7 +544,7 @@ public ClusterState executeTask( final IndexAbstraction rolloverTargetAbstraction = currentState.metadata() .getIndicesLookup() - .get(rolloverRequest.getRolloverTarget()); + .get(resolvedRolloverTarget.resource()); final IndexMetadataStats sourceIndexStats = rolloverTargetAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM ? IndexMetadataStats.fromStatsResponse(rolloverSourceIndex, rolloverTask.statsResponse()) @@ -541,7 +553,7 @@ public ClusterState executeTask( // Perform the actual rollover final var rolloverResult = rolloverService.rolloverClusterState( currentState, - rolloverRequest.getRolloverTarget(), + resolvedRolloverTarget.resource(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), metConditions, @@ -550,7 +562,7 @@ public ClusterState executeTask( false, sourceIndexStats, rolloverTask.autoShardingResult(), - rolloverRequest.targetsFailureStore() + targetFailureStore ); results.add(rolloverResult); logger.trace("rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index b137809047d18..dd473869fb2d9 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.node.NodeClient; @@ -216,11 +216,9 @@ private void rollOverFailureStores(Runnable runnable) { } try (RefCountingRunnable refs = new RefCountingRunnable(runnable)) { for (String dataStream : failureStoresToBeRolledOver) { - RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() + RolloverRequest rolloverRequest = new RolloverRequest( + IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES), + null ); // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 65264faf50129..2a6a789d9d312 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -425,11 +425,7 @@ private void rollOverDataStreams( RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null); rolloverRequest.masterNodeTimeout(bulkRequest.timeout); if (targetFailureStore) { - rolloverRequest.setIndicesOptions( - IndicesOptions.builder(rolloverRequest.indicesOptions()) - .selectorOptions(IndicesOptions.SelectorOptions.FAILURES) - .build() - ); + rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES)); } // We are executing a lazy rollover because it is an action specialised for this situation, when we want an // unconditional and performant rollover. @@ -438,9 +434,8 @@ private void rollOverDataStreams( @Override public void onResponse(RolloverResponse result) { logger.debug( - "Data stream{} {} has {} over, the latest index is {}", - rolloverRequest.targetsFailureStore() ? " failure store" : "", - dataStream, + "Data stream [{}] has {} over, the latest index is {}", + rolloverRequest.getRolloverTarget(), result.isRolledOver() ? "been successfully rolled" : "skipped rolling", result.getNewIndex() ); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java index a0a05138406c5..62caba8f7ed96 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsActionUtil.java @@ -9,16 +9,18 @@ package org.elasticsearch.action.datastreams; +import org.elasticsearch.action.support.IndexComponentSelector; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.index.Index; +import java.util.ArrayList; import java.util.List; import java.util.SortedMap; -import java.util.stream.Stream; public class DataStreamsActionUtil { @@ -47,25 +49,79 @@ public static IndicesOptions updateIndicesOptions(IndicesOptions indicesOptions) return indicesOptions; } - public static Stream resolveConcreteIndexNames( + public static List resolveConcreteIndexNames( IndexNameExpressionResolver indexNameExpressionResolver, ClusterState clusterState, String[] names, IndicesOptions indicesOptions ) { - List abstractionNames = getDataStreamNames(indexNameExpressionResolver, clusterState, names, indicesOptions); + List abstractionNames = indexNameExpressionResolver.dataStreams( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); - return abstractionNames.stream().flatMap(abstractionName -> { + List results = new ArrayList<>(abstractionNames.size()); + for (ResolvedExpression abstractionName : abstractionNames) { + IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName.resource()); + assert indexAbstraction != null; + if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { + selectDataStreamIndicesNames( + (DataStream) indexAbstraction, + IndexComponentSelector.FAILURES.equals(abstractionName.selector()), + results + ); + } + } + return results; + } + + /** + * Resolves a list of expressions into data stream names and then collects the concrete indices + * that are applicable for those data streams based on the selector provided in the arguments. + * @param indexNameExpressionResolver resolver object + * @param clusterState state to query + * @param names data stream expressions + * @param selector which component indices of the data stream should be returned + * @param indicesOptions options for expression resolution + * @return A stream of concrete index names that belong to the components specified + * on the data streams returned from the expressions given + */ + public static List resolveConcreteIndexNamesWithSelector( + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterState clusterState, + String[] names, + IndexComponentSelector selector, + IndicesOptions indicesOptions + ) { + assert indicesOptions.allowSelectors() == false : "If selectors are enabled, use resolveConcreteIndexNames instead"; + List abstractionNames = indexNameExpressionResolver.dataStreamNames( + clusterState, + updateIndicesOptions(indicesOptions), + names + ); + SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); + + List results = new ArrayList<>(abstractionNames.size()); + for (String abstractionName : abstractionNames) { IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); assert indexAbstraction != null; if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { - DataStream dataStream = (DataStream) indexAbstraction; - List indices = dataStream.getIndices(); - return indices.stream().map(Index::getName); - } else { - return Stream.empty(); + if (selector.shouldIncludeData()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, false, results); + } + if (selector.shouldIncludeFailures()) { + selectDataStreamIndicesNames((DataStream) indexAbstraction, true, results); + } } - }); + } + return results; + } + + private static void selectDataStreamIndicesNames(DataStream indexAbstraction, boolean failureStore, List accumulator) { + for (Index index : indexAbstraction.getDataStreamIndices(failureStore).getIndices()) { + accumulator.add(index.getName()); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java index 9266bae439b73..82afeec752378 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DataStreamsStatsAction.java @@ -38,8 +38,6 @@ public DataStreamsStatsAction() { public static class Request extends BroadcastRequest { public Request() { - // this doesn't really matter since data stream name resolution isn't affected by IndicesOptions and - // a data stream's backing indices are retrieved from its metadata super( null, IndicesOptions.builder() @@ -58,10 +56,9 @@ public Request() { .allowAliasToMultipleIndices(true) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) - .selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE) .build() ); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index 4f647d4f02884..640c88918ffc0 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -61,7 +61,7 @@ public static class Request extends MasterNodeRequest implements Indice .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 883fc543749c2..c55957787aee7 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -72,10 +72,11 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); + private boolean includeDefaults = false; private boolean verbose = false; diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index a43d29501a7ee..401bd7a27c6fa 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -63,7 +63,7 @@ public static class Request extends MasterNodeReadRequest implements In .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(true) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java index b054d12890366..c2b7de8d5df8b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java @@ -94,7 +94,7 @@ public static Request parseRequest(XContentParser parser, Factory factory) { .allowAliasToMultipleIndices(false) .allowClosedIndices(true) .ignoreThrottled(false) - .allowFailureIndices(false) + .allowSelectors(false) .build() ) .build(); diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 62771230636c1..cce01aca7685a 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -82,7 +82,7 @@ public String[] indices() { @Override public IndicesOptions indicesOptions() { - return IndicesOptions.STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED; + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java index ba1afaf4678fb..7890a0f9f9738 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SubSearchSourceBuilder; @@ -49,7 +48,8 @@ public class CanMatchNodeRequest extends TransportRequest implements IndicesRequ private final SearchType searchType; private final Boolean requestCache; private final boolean allowPartialSearchResults; - private final Scroll scroll; + @Nullable + private final TimeValue scroll; private final int numberOfShards; private final long nowInMillis; @Nullable @@ -195,7 +195,7 @@ public CanMatchNodeRequest(StreamInput in) throws IOException { ); } } - scroll = in.readOptionalWriteable(Scroll::new); + scroll = in.readOptionalTimeValue(); requestCache = in.readOptionalBoolean(); allowPartialSearchResults = in.readBoolean(); numberOfShards = in.readVInt(); @@ -216,7 +216,7 @@ public void writeTo(StreamOutput out) throws IOException { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scroll); out.writeOptionalBoolean(requestCache); out.writeBoolean(allowPartialSearchResults); out.writeVInt(numberOfShards); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 2e1d58e042f09..8b77ec7fb5463 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -24,7 +24,6 @@ import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.Rewriteable; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.SearchContext; @@ -82,7 +81,7 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla private Boolean allowPartialSearchResults; - private Scroll scroll; + private TimeValue scrollKeepAlive; private int batchedReduceSize = DEFAULT_BATCHED_REDUCE_SIZE; @@ -206,7 +205,7 @@ private SearchRequest( this.preFilterShardSize = searchRequest.preFilterShardSize; this.requestCache = searchRequest.requestCache; this.routing = searchRequest.routing; - this.scroll = searchRequest.scroll; + this.scrollKeepAlive = searchRequest.scrollKeepAlive; this.searchType = searchRequest.searchType; this.source = searchRequest.source; this.localClusterAlias = localClusterAlias; @@ -229,7 +228,7 @@ public SearchRequest(StreamInput in) throws IOException { indices = in.readStringArray(); routing = in.readOptionalString(); preference = in.readOptionalString(); - scroll = in.readOptionalWriteable(Scroll::new); + scrollKeepAlive = in.readOptionalTimeValue(); source = in.readOptionalWriteable(SearchSourceBuilder::new); if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore @@ -276,7 +275,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); out.writeOptionalString(routing); out.writeOptionalString(preference); - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scrollKeepAlive); out.writeOptionalWriteable(source); if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions @@ -525,23 +524,16 @@ public String[] indices() { /** * If set, will enable scrolling of the search request. */ - public Scroll scroll() { - return scroll; - } - - /** - * If set, will enable scrolling of the search request. - */ - public SearchRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; + public TimeValue scroll() { + return scrollKeepAlive; } /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchRequest scroll(TimeValue keepAlive) { - return scroll(new Scroll(keepAlive)); + this.scrollKeepAlive = keepAlive; + return this; } /** @@ -681,7 +673,7 @@ public boolean hasKnnSearch() { } public int resolveTrackTotalHitsUpTo() { - return resolveTrackTotalHitsUpTo(scroll, source); + return resolveTrackTotalHitsUpTo(scrollKeepAlive, source); } /** @@ -731,7 +723,7 @@ public SearchRequest rewrite(QueryRewriteContext ctx) throws IOException { return hasChanged ? new SearchRequest(this).source(source) : this; } - public static int resolveTrackTotalHitsUpTo(Scroll scroll, SearchSourceBuilder source) { + public static int resolveTrackTotalHitsUpTo(TimeValue scroll, SearchSourceBuilder source) { if (scroll != null) { // no matter what the value of track_total_hits is return SearchContext.TRACK_TOTAL_HITS_ACCURATE; @@ -752,8 +744,8 @@ public final String buildDescription() { Strings.arrayToDelimitedString(indices, ",", sb); sb.append("]"); sb.append(", search_type[").append(searchType).append("]"); - if (scroll != null) { - sb.append(", scroll[").append(scroll.keepAlive()).append("]"); + if (scrollKeepAlive != null) { + sb.append(", scroll[").append(scrollKeepAlive).append("]"); } if (source != null) { sb.append(", source[").append(source.toString(FORMAT_PARAMS)).append("]"); @@ -784,7 +776,7 @@ public boolean equals(Object o) { && Objects.equals(preference, that.preference) && Objects.equals(source, that.source) && Objects.equals(requestCache, that.requestCache) - && Objects.equals(scroll, that.scroll) + && Objects.equals(scrollKeepAlive, that.scrollKeepAlive) && Objects.equals(batchedReduceSize, that.batchedReduceSize) && Objects.equals(maxConcurrentShardRequests, that.maxConcurrentShardRequests) && Objects.equals(preFilterShardSize, that.preFilterShardSize) @@ -805,7 +797,7 @@ public int hashCode() { preference, source, requestCache, - scroll, + scrollKeepAlive, indicesOptions, batchedReduceSize, maxConcurrentShardRequests, @@ -836,7 +828,7 @@ public String toString() { + ", requestCache=" + requestCache + ", scroll=" - + scroll + + scrollKeepAlive + ", maxConcurrentShardRequests=" + maxConcurrentShardRequests + ", batchedReduceSize=" diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 2927c394da3d4..d309ef3a7498a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.Script; -import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -62,14 +61,6 @@ public SearchRequestBuilder setSearchType(SearchType searchType) { return this; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchRequestBuilder setScroll(Scroll scroll) { - request.scroll(scroll); - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 18edc83bf3dcc..6074a591a9762 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -343,8 +343,7 @@ public ShardSearchFailure[] getShardFailures() { } /** - * If scrolling was enabled ({@link SearchRequest#scroll(org.elasticsearch.search.Scroll)}, the - * scroll id that can be used to continue scrolling. + * If scrolling was enabled ({@link SearchRequest#scroll(TimeValue)}, the scroll id that can be used to continue scrolling. */ public String getScrollId() { return scrollId; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java index 71b88b03a5463..1d34288665ae4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequest.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.ToXContentObject; @@ -30,7 +29,7 @@ public class SearchScrollRequest extends ActionRequest implements ToXContentObject { private String scrollId; - private Scroll scroll; + private TimeValue scroll; public SearchScrollRequest() {} @@ -41,14 +40,14 @@ public SearchScrollRequest(String scrollId) { public SearchScrollRequest(StreamInput in) throws IOException { super(in); scrollId = in.readString(); - scroll = in.readOptionalWriteable(Scroll::new); + scroll = in.readOptionalTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(scrollId); - out.writeOptionalWriteable(scroll); + out.writeOptionalTimeValue(scroll); } @Override @@ -79,23 +78,16 @@ public ParsedScrollId parseScrollId() { /** * If set, will enable scrolling of the search request. */ - public Scroll scroll() { + public TimeValue scroll() { return scroll; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchScrollRequest scroll(Scroll scroll) { - this.scroll = scroll; - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ public SearchScrollRequest scroll(TimeValue keepAlive) { - return scroll(new Scroll(keepAlive)); + this.scroll = keepAlive; + return this; } @Override @@ -135,7 +127,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("scroll_id", scrollId); if (scroll != null) { - builder.field("scroll", scroll.keepAlive().getStringRep()); + builder.field("scroll", scroll.getStringRep()); } builder.endObject(); return builder; @@ -157,7 +149,7 @@ public void fromXContent(XContentParser parser) throws IOException { } else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { scrollId(parser.text()); } else if ("scroll".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { - scroll(new Scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll"))); + scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll")); } else { throw new IllegalArgumentException( "Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] " diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java index 24dac98166ce0..57a6e44f4c2b0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollRequestBuilder.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.Scroll; /** * A search scroll action request builder. @@ -35,14 +34,6 @@ public SearchScrollRequestBuilder setScrollId(String scrollId) { return this; } - /** - * If set, will enable scrolling of the search request. - */ - public SearchScrollRequestBuilder setScroll(Scroll scroll) { - request.scroll(scroll); - return this; - } - /** * If set, will enable scrolling of the search request for the specified timeout. */ diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index ebbd47336e3da..4231d598b2d70 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; @@ -47,37 +46,13 @@ * @param gatekeeperOptions, applies to all the resolved indices and defines if throttled will be included and if certain type of * aliases or indices are allowed, or they will throw an error. It acts as a gatekeeper when an action * does not support certain options. - * @param selectorOptions, applies to all resolved expressions, and it specifies the index component that should be included, if there - * is no index component defined on the expression level. */ public record IndicesOptions( ConcreteTargetOptions concreteTargetOptions, WildcardOptions wildcardOptions, - GatekeeperOptions gatekeeperOptions, - SelectorOptions selectorOptions + GatekeeperOptions gatekeeperOptions ) implements ToXContentFragment { - /** - * @deprecated this query param will be replaced by the selector `::` on the expression level - */ - @Deprecated - public static final String FAILURE_STORE_QUERY_PARAM = "failure_store"; - /** - * @deprecated this value will be replaced by the selector `::*` on the expression level - */ - @Deprecated - public static final String INCLUDE_ALL = "include"; - /** - * @deprecated this value will be replaced by the selector `::data` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_REGULAR_INDICES = "exclude"; - /** - * @deprecated this value will be replaced by the selector `::failures` on the expression level - */ - @Deprecated - public static final String INCLUDE_ONLY_FAILURE_INDICES = "only"; - public static IndicesOptions.Builder builder() { return new Builder(); } @@ -324,14 +299,14 @@ public static Builder builder(WildcardOptions wildcardOptions) { * - The ignoreThrottled flag, which is a deprecated flag that will filter out frozen indices. * @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default. * @param allowClosedIndices, allow closed indices, true by default. - * @param allowFailureIndices, allow failure indices in the response, true by default + * @param allowSelectors, allow selectors within index expressions, true by default. * @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. This is deprecated and the only one * that only filters and never throws an error. */ public record GatekeeperOptions( boolean allowAliasToMultipleIndices, boolean allowClosedIndices, - boolean allowFailureIndices, + boolean allowSelectors, @Deprecated boolean ignoreThrottled ) implements ToXContentFragment { @@ -355,7 +330,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public static class Builder { private boolean allowAliasToMultipleIndices; private boolean allowClosedIndices; - private boolean allowFailureIndices; + private boolean allowSelectors; private boolean ignoreThrottled; public Builder() { @@ -365,7 +340,7 @@ public Builder() { Builder(GatekeeperOptions options) { allowAliasToMultipleIndices = options.allowAliasToMultipleIndices; allowClosedIndices = options.allowClosedIndices; - allowFailureIndices = options.allowFailureIndices; + allowSelectors = options.allowSelectors; ignoreThrottled = options.ignoreThrottled; } @@ -388,11 +363,12 @@ public Builder allowClosedIndices(boolean allowClosedIndices) { } /** - * Failure indices are accepted when true, otherwise the resolution will throw an error. + * Selectors are allowed within index expressions when true, otherwise the resolution will treat their presence as a syntax + * error when resolving index expressions. * Defaults to true. */ - public Builder allowFailureIndices(boolean allowFailureIndices) { - this.allowFailureIndices = allowFailureIndices; + public Builder allowSelectors(boolean allowSelectors) { + this.allowSelectors = allowSelectors; return this; } @@ -405,7 +381,7 @@ public Builder ignoreThrottled(boolean ignoreThrottled) { } public GatekeeperOptions build() { - return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowFailureIndices, ignoreThrottled); + return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowSelectors, ignoreThrottled); } } @@ -418,50 +394,6 @@ public static Builder builder(GatekeeperOptions gatekeeperOptions) { } } - /** - * Defines which selectors should be used by default for an index operation in the event that no selectors are provided. - */ - public record SelectorOptions(IndexComponentSelector defaultSelector) implements Writeable { - - public static final SelectorOptions ALL_APPLICABLE = new SelectorOptions(IndexComponentSelector.ALL_APPLICABLE); - public static final SelectorOptions DATA = new SelectorOptions(IndexComponentSelector.DATA); - public static final SelectorOptions FAILURES = new SelectorOptions(IndexComponentSelector.FAILURES); - /** - * Default instance. Uses
::data
as the default selector if none are present in an index expression. - */ - public static final SelectorOptions DEFAULT = DATA; - - public static SelectorOptions read(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - EnumSet set = in.readEnumSet(IndexComponentSelector.class); - if (set.isEmpty() || set.size() == 2) { - assert set.contains(IndexComponentSelector.DATA) && set.contains(IndexComponentSelector.FAILURES) - : "The enum set only supported ::data and ::failures"; - return SelectorOptions.ALL_APPLICABLE; - } else if (set.contains(IndexComponentSelector.DATA)) { - return SelectorOptions.DATA; - } else { - return SelectorOptions.FAILURES; - } - } else { - return new SelectorOptions(IndexComponentSelector.read(in)); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersions.INTRODUCE_ALL_APPLICABLE_SELECTOR)) { - switch (defaultSelector) { - case ALL_APPLICABLE -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA, IndexComponentSelector.FAILURES)); - case DATA -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.DATA)); - case FAILURES -> out.writeEnumSet(EnumSet.of(IndexComponentSelector.FAILURES)); - } - } else { - defaultSelector.writeTo(out); - } - } - } - /** * This class is maintained for backwards compatibility and performance purposes. We use it for serialisation along with {@link Option}. */ @@ -497,7 +429,8 @@ private enum Option { ERROR_WHEN_CLOSED_INDICES, IGNORE_THROTTLED, - ALLOW_FAILURE_INDICES // Added in 8.14 + ALLOW_FAILURE_INDICES, // Added in 8.14, Removed in 8.18 + ALLOW_SELECTORS // Added in 8.18 } private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndicesOptions.class); @@ -510,8 +443,7 @@ private enum Option { public static final IndicesOptions DEFAULT = new IndicesOptions( ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS, WildcardOptions.DEFAULT, - GatekeeperOptions.DEFAULT, - SelectorOptions.DEFAULT + GatekeeperOptions.DEFAULT ); public static final IndicesOptions STRICT_EXPAND_OPEN = IndicesOptions.builder() @@ -528,10 +460,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -547,10 +478,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -566,10 +496,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -585,7 +514,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -603,10 +532,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -622,10 +550,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -636,10 +563,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTOR = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS) @@ -650,7 +576,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -668,10 +594,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -682,10 +607,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -696,7 +620,7 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(false) + .allowSelectors(false) .ignoreThrottled(false) ) .build(); @@ -714,10 +638,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -728,10 +651,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -747,10 +669,9 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(true) .allowClosedIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.ALL_APPLICABLE) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -766,10 +687,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -785,10 +705,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED_IGNORE_THROTTLED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -804,10 +723,9 @@ private enum Option { GatekeeperOptions.builder() .ignoreThrottled(true) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(true) .allowAliasToMultipleIndices(true) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -823,10 +741,27 @@ private enum Option { GatekeeperOptions.builder() .allowAliasToMultipleIndices(false) .allowClosedIndices(false) - .allowFailureIndices(true) + .allowSelectors(false) + .ignoreThrottled(false) + ) + .build(); + public static final IndicesOptions STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED_ALLOW_SELECTORS = IndicesOptions.builder() + .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) + .wildcardOptions( + WildcardOptions.builder() + .matchOpen(false) + .matchClosed(false) + .includeHidden(false) + .allowEmptyExpressions(true) + .resolveAliases(true) + ) + .gatekeeperOptions( + GatekeeperOptions.builder() + .allowAliasToMultipleIndices(false) + .allowClosedIndices(false) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); public static final IndicesOptions STRICT_NO_EXPAND_FORBID_CLOSED = IndicesOptions.builder() .concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS) @@ -842,10 +777,9 @@ private enum Option { GatekeeperOptions.builder() .allowClosedIndices(false) .allowAliasToMultipleIndices(true) - .allowFailureIndices(true) + .allowSelectors(true) .ignoreThrottled(false) ) - .selectorOptions(SelectorOptions.DATA) .build(); /** @@ -903,10 +837,10 @@ public boolean forbidClosedIndices() { } /** - * @return Whether execution on failure indices is allowed. + * @return Whether selectors (::) are allowed in the index expression. */ - public boolean allowFailureIndices() { - return gatekeeperOptions.allowFailureIndices(); + public boolean allowSelectors() { + return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.allowSelectors(); } /** @@ -930,20 +864,6 @@ public boolean ignoreThrottled() { return gatekeeperOptions().ignoreThrottled(); } - /** - * @return whether regular indices (stand-alone or backing indices) will be included in the response - */ - public boolean includeRegularIndices() { - return selectorOptions().defaultSelector().shouldIncludeData(); - } - - /** - * @return whether failure indices (only supported by certain data streams) will be included in the response - */ - public boolean includeFailureIndices() { - return selectorOptions().defaultSelector().shouldIncludeFailures(); - } - public void writeIndicesOptions(StreamOutput out) throws IOException { EnumSet