diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 46da86dd4d1fd..c5414fa994330 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -60,6 +60,7 @@ BWC_VERSION: - "7.17.9" - "7.17.10" - "7.17.11" + - "7.17.12" - "8.0.0" - "8.0.1" - "8.1.0" @@ -90,5 +91,6 @@ BWC_VERSION: - "8.8.0" - "8.8.1" - "8.8.2" + - "8.8.3" - "8.9.0" - "8.10.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index f6efee522355e..a59023ae923da 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "7.17.11" - - "8.8.2" + - "7.17.12" + - "8.8.3" - "8.9.0" - "8.10.0" diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 64f89d6c57edf..a8f380195e16c 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -150,3 +150,6 @@ org.elasticsearch.cluster.service.ClusterService#submitUnbatchedStateUpdateTask( @defaultMessage Reacting to the published cluster state is an obstruction to batching cluster state tasks which leads to performance and stability bugs. Use the variants that accept a Runnable instead. org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer) org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer, org.elasticsearch.cluster.ClusterStateAckListener) + +@defaultMessage ClusterState#transportVersions are for internal use only. Use ClusterState#getMinTransportVersion or a different version. See TransportVersion javadocs for more info. +org.elasticsearch.cluster.ClusterState#transportVersions() diff --git a/docs/changelog/93545.yaml b/docs/changelog/93545.yaml new file mode 100644 index 0000000000000..4367e44024e58 --- /dev/null +++ b/docs/changelog/93545.yaml @@ -0,0 +1,5 @@ +pr: 93545 +summary: Improve error message when aggregation doesn't support counter field +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/96161.yaml b/docs/changelog/96161.yaml index a255a4e71083e..66368c403a94d 100644 --- a/docs/changelog/96161.yaml +++ b/docs/changelog/96161.yaml @@ -4,3 +4,28 @@ area: "Search" type: enhancement issues: - 95541 +highlight: + title: Better indexing and search performance under concurrent indexing and search + body: "When a query like a match phrase query or a terms query targets a constant keyword field we can skip\ + query execution on shards where the query is rewritten to match no documents. We take advantage of index mappings\ + including constant keyword fields and rewrite queries in such a way that, if a constant keyword field does not\ + match the value defined in the index mapping, we rewrite the query to match no document. This will result in the\ + shard level request to return immediately, before the query is executed on the data node and, as a result, skipping\ + the shard completely. Here we leverage the ability to skip shards whenever possible to avoid unnecessary shard\ + refreshes and improve query latency (by not doing any search-related I/O). Avoiding such unnecessary shard refreshes\ + improves query latency since the search thread does not need to wait anymore for unnecessary shard refreshes. Shards\ + not matching the query criteria will remain in a search-idle state and indexing throughput will not be negatively\ + affected by a refresh. Before introducing this change a query hitting multiple shards, including those with no\ + documents matching the search criteria (think about using index patterns or data streams with many backing indices),\ + would potentially result in a \"shard refresh storm\" increasing query latency as a result of the search thread\ + waiting on all shard refreshes to complete before being able to initiate and carry out the search operation.\ + After introducing this change the search thread will just need to wait for refreshes to be completed on shards\ + including relevant data. Note that execution of the shard pre-filter and the corresponding \"can match\" phase where\ + rewriting happens, depends on the overall number of shards involved and on whether there is at least one of them\ + returning a non-empty result (see 'pre_filter_shard_size' setting to understand how to control this behaviour).\ + Elasticsearch does the rewrite operation on the data node in the so called \"can match\" phase, taking advantage of\ + the fact that, at that moment, we can access index mappings and extract information about constant keyword fields\ + and their values. This means we still\"fan-out\" search queries from the coordinator node to involved data nodes.\ + Rewriting queries based on index mappings is not possible on the coordinator node because the coordinator node is\ + missing index mapping information." + notable: true diff --git a/docs/changelog/96243.yaml b/docs/changelog/96243.yaml deleted file mode 100644 index fc89aa67ce1c8..0000000000000 --- a/docs/changelog/96243.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96243 -summary: Support dotted field notations in the reroute processor -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/96251.yaml b/docs/changelog/96251.yaml deleted file mode 100644 index 8405e710a2ad6..0000000000000 --- a/docs/changelog/96251.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96251 -summary: Avoiding running IO on scheduler thread in `ResourceWatcherService` -area: Watcher -type: bug -issues: [] diff --git a/docs/changelog/96540.yaml b/docs/changelog/96540.yaml deleted file mode 100644 index 4021ede176923..0000000000000 --- a/docs/changelog/96540.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96540 -summary: Fix translation of queries involving Version vals -area: SQL -type: bug -issues: - - 96509 diff --git a/docs/changelog/96551.yaml b/docs/changelog/96551.yaml deleted file mode 100644 index 3184ea120869b..0000000000000 --- a/docs/changelog/96551.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96551 -summary: Make cluster health API cancellable -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/96606.yaml b/docs/changelog/96606.yaml deleted file mode 100644 index 4ff1f498b2661..0000000000000 --- a/docs/changelog/96606.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96606 -summary: The get data stream api incorrectly prints warning log for upgraded tsdb - data streams -area: TSDB -type: bug -issues: [] diff --git a/docs/changelog/96668.yaml b/docs/changelog/96668.yaml deleted file mode 100644 index 483c0f462743f..0000000000000 --- a/docs/changelog/96668.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96668 -summary: Fix iteration of empty percentiles throwing Null Pointer Exception -area: Aggregations -type: bug -issues: - - 96626 diff --git a/docs/changelog/96738.yaml b/docs/changelog/96738.yaml deleted file mode 100644 index f4fb71d42f812..0000000000000 --- a/docs/changelog/96738.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96738 -summary: Ensure NLP model inference queue is always cleared after shutdown or failure -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/96782.yaml b/docs/changelog/96782.yaml deleted file mode 100644 index a3bb799d63403..0000000000000 --- a/docs/changelog/96782.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96782 -summary: Increase concurrent request of opening point-in-time -area: Search -type: bug -issues: [] diff --git a/docs/changelog/96785.yaml b/docs/changelog/96785.yaml deleted file mode 100644 index 9918cbae46633..0000000000000 --- a/docs/changelog/96785.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96785 -summary: Adding null check to fix potential NPE -area: Transform -type: enhancement -issues: - - 96781 diff --git a/docs/changelog/96821.yaml b/docs/changelog/96821.yaml deleted file mode 100644 index 60cc5cc35c0e5..0000000000000 --- a/docs/changelog/96821.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96821 -summary: Change rollup thread pool settings -area: TSDB -type: enhancement -issues: - - 96758 diff --git a/docs/changelog/96843.yaml b/docs/changelog/96843.yaml deleted file mode 100644 index c1f4439bc65ab..0000000000000 --- a/docs/changelog/96843.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96843 -summary: Uses `ClusterSettings` instead of Node `Settings` in `HealthMetadataService` -area: Health -type: bug -issues: - - 96219 diff --git a/docs/changelog/97041.yaml b/docs/changelog/97041.yaml new file mode 100644 index 0000000000000..6bd6f642be26b --- /dev/null +++ b/docs/changelog/97041.yaml @@ -0,0 +1,5 @@ +pr: 97041 +summary: Introduce downsampling configuration for data stream lifecycle +area: Data streams +type: feature +issues: [] diff --git a/docs/changelog/97159.yaml b/docs/changelog/97159.yaml new file mode 100644 index 0000000000000..ddd7bb928d7b6 --- /dev/null +++ b/docs/changelog/97159.yaml @@ -0,0 +1,5 @@ +pr: 97159 +summary: Improve exists query rewrite +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/97203.yaml b/docs/changelog/97203.yaml new file mode 100644 index 0000000000000..56d9ddd446b7d --- /dev/null +++ b/docs/changelog/97203.yaml @@ -0,0 +1,5 @@ +pr: 97203 +summary: Fix possible NPE when transportversion is null in `MainResponse` +area: Infra/REST API +type: bug +issues: [] diff --git a/docs/changelog/97224.yaml b/docs/changelog/97224.yaml new file mode 100644 index 0000000000000..50605bd6ad67f --- /dev/null +++ b/docs/changelog/97224.yaml @@ -0,0 +1,5 @@ +pr: 97224 +summary: Remove exception wrapping in `BatchedRerouteService` +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/97234.yaml b/docs/changelog/97234.yaml new file mode 100644 index 0000000000000..c4326fcfcc1ca --- /dev/null +++ b/docs/changelog/97234.yaml @@ -0,0 +1,5 @@ +pr: 97234 +summary: Add "operator" field to authenticate response +area: Authorization +type: enhancement +issues: [] diff --git a/docs/plugins/analysis-nori.asciidoc b/docs/plugins/analysis-nori.asciidoc index a28c55553d697..1a3153fa3bea5 100644 --- a/docs/plugins/analysis-nori.asciidoc +++ b/docs/plugins/analysis-nori.asciidoc @@ -305,7 +305,7 @@ Which responds with: The `nori_part_of_speech` token filter removes tokens that match a set of part-of-speech tags. The list of supported tags and their meanings can be found here: -{lucene-core-javadoc}/../analyzers-nori/org/apache/lucene/analysis/ko/POS.Tag.html[Part of speech tags] +{lucene-core-javadoc}/../analysis/nori/org/apache/lucene/analysis/ko/POS.Tag.html[Part of speech tags] It accepts the following setting: diff --git a/docs/reference/data-management.asciidoc b/docs/reference/data-management.asciidoc index fa7d10c37863e..f189ef4e4e965 100644 --- a/docs/reference/data-management.asciidoc +++ b/docs/reference/data-management.asciidoc @@ -20,17 +20,32 @@ so you can move it to less expensive, less performant hardware. For your oldest data, what matters is that you have access to the data. It's ok if queries take longer to complete. -To help you manage your data, {es} enables you to: +To help you manage your data, {es} offers you: +* <> ({ilm-init}) to manage both indices and data streams and it is fully customisable, and +* <> which is the built-in lifecycle of data streams and addresses the most +common lifecycle management needs. + +preview::["The built-in data stream lifecycle is in technical preview and may be changed or removed in a future release. Elastic will apply best effort to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +**{ilm-init}** can be used to manage both indices and data streams and it allows you to: + +* Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. +Data older than this period can be deleted by {es}. * Define <> of data nodes with different performance characteristics. -* Automatically transition indices through the data tiers according to your performance needs and retention policies -with <> ({ilm-init}). +* Automatically transition indices through the data tiers according to your performance needs and retention policies. * Leverage <> stored in a remote repository to provide resiliency for your older indices while reducing operating costs and maintaining search performance. * Perform <> of data stored on less-performant hardware. + +**Data stream lifecycle** is less feature rich but is focused on simplicity, so it allows you to easily: + +* Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. +Data older than this period can be deleted by {es} at a later time. +* Improve the performance of your data stream by performing background operations that will optimise the way your data +stream is stored. -- include::ilm/index.asciidoc[] include::datatiers.asciidoc[] - diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index 42b42ead04ab7..b21a08aa853b2 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -12,6 +12,14 @@ The following APIs are available for managing <>: * <> * <> +[[data-stream-lifecycle-api]] +The following APIs are available for managing the built-in lifecycle of data streams: + +* <> preview:[] +* <> preview:[] +* <> preview:[] +* <> preview:[] + The following API is available for <>: * <> @@ -33,4 +41,12 @@ include::{es-repo-dir}/data-streams/promote-data-stream-api.asciidoc[] include::{es-repo-dir}/data-streams/modify-data-streams-api.asciidoc[] +include::{es-repo-dir}/data-streams/lifecycle/apis/put-lifecycle.asciidoc[] + +include::{es-repo-dir}/data-streams/lifecycle/apis/get-lifecycle.asciidoc[] + +include::{es-repo-dir}/data-streams/lifecycle/apis/delete-lifecycle.asciidoc[] + +include::{es-repo-dir}/data-streams/lifecycle/apis/explain-lifecycle.asciidoc[] + include::{es-repo-dir}/indices/downsample-data-stream.asciidoc[] diff --git a/docs/reference/data-streams/data-streams.asciidoc b/docs/reference/data-streams/data-streams.asciidoc index 9c8864f42c24d..307930d64c4fb 100644 --- a/docs/reference/data-streams/data-streams.asciidoc +++ b/docs/reference/data-streams/data-streams.asciidoc @@ -135,3 +135,4 @@ include::set-up-a-data-stream.asciidoc[] include::use-a-data-stream.asciidoc[] include::change-mappings-and-settings.asciidoc[] include::tsds.asciidoc[] +include::lifecycle/index.asciidoc[] diff --git a/docs/reference/dlm/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc similarity index 91% rename from docs/reference/dlm/apis/delete-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc index f01992d522267..fd481d7ca4815 100644 --- a/docs/reference/dlm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc @@ -1,10 +1,10 @@ -[[dlm-delete-lifecycle]] +[[data-streams-delete-lifecycle]] === Delete the lifecycle of a data stream ++++ Delete Data Stream Lifecycle ++++ -experimental::[] +preview::[] Deletes the lifecycle from a set of data streams. @@ -14,18 +14,18 @@ Deletes the lifecycle from a set of data streams. * If the {es} {security-features} are enabled, you must have the `manage_data_stream_lifecycle` index privilege or higher to use this API. For more information, see <>. -[[dlm-delete-lifecycle-request]] +[[data-streams-delete-lifecycle-request]] ==== {api-request-title} `DELETE _data_stream//_lifecycle` -[[dlm-delete-lifecycle-desc]] +[[data-streams-delete-lifecycle-desc]] ==== {api-description-title} Deletes the lifecycle from the specified data streams. If multiple data streams are provided but at least one of them does not exist, then the deletion of the lifecycle will fail for all of them and the API will respond with `404`. -[[dlm-delete-lifecycle-path-params]] +[[data-streams-delete-lifecycle-path-params]] ==== {api-path-parms-title} ``:: @@ -41,7 +41,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ds-expand-wildcards] + Defaults to `open`. -[[dlm-delete-lifecycle-example]] +[[data-streams-delete-lifecycle-example]] ==== {api-examples-title} //// diff --git a/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc similarity index 83% rename from docs/reference/dlm/apis/explain-data-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc index 25b75e1a53aff..bb685fa10b2ba 100644 --- a/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc @@ -1,41 +1,39 @@ -[[dlm-explain-lifecycle]] -=== Explain Lifecycle API +[[data-streams-explain-lifecycle]] +=== Explain data stream lifecycle ++++ -Explain Data Lifecycle +Explain Data Stream Lifecycle ++++ -experimental::[] +preview::[] Retrieves the current data lifecycle status for one or more data stream backing indices. [[explain-lifecycle-api-prereqs]] ==== {api-prereq-title} -* Nit: would rephrase as: - If the {es} {security-features} are enabled, you must have at least the `manage_data_stream_lifecycle` index privilege or `view_index_metadata` index privilege to use this API. For more information, see <>. -[[dlm-explain-lifecycle-request]] +[[data-streams-explain-lifecycle-request]] ==== {api-request-title} `GET /_lifecycle/explain` -[[dlm-explain-lifecycle-desc]] +[[data-streams-explain-lifecycle-desc]] ==== {api-description-title} -Retrieves information about the index's current DLM lifecycle state, such as +Retrieves information about the index or data stream's current data stream lifecycle state, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any error that {es} might've encountered during the lifecycle execution. -[[dlm-explain-lifecycle-path-params]] +[[data-streams-explain-lifecycle-path-params]] ==== {api-path-parms-title} ``:: (Required, string) Comma-separated list of indices. -[[dlm-explain-lifecycle-query-params]] +[[data-streams-explain-lifecycle-query-params]] ==== {api-query-parms-title} `include_defaults`:: @@ -44,7 +42,7 @@ execution. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -[[dlm-explain-lifecycle-example]] +[[data-streams-explain-lifecycle-example]] ==== {api-examples-title} The following example retrieves the lifecycle state of the index `.ds-metrics-2023.03.22-000001`: @@ -53,9 +51,9 @@ The following example retrieves the lifecycle state of the index `.ds-metrics-20 -------------------------------------------------- GET .ds-metrics-2023.03.22-000001/_lifecycle/explain -------------------------------------------------- -// TEST[skip:we're not setting up DLM in these tests] +// TEST[skip:we're not setting up data stream lifecycle in these tests] -If the index is managed by DLM `explain` will show the `managed_by_lifecycle` field +If the index is managed by a data stream lifecycle `explain` will show the `managed_by_lifecycle` field set to `true` and the rest of the response will contain information about the lifecycle execution status for this index: @@ -77,8 +75,8 @@ lifecycle execution status for this index: -------------------------------------------------- // TESTRESPONSE[skip:the result is for illustrating purposes only] -<1> Shows if the index is being managed by DLM. If the index is not managed by -DLM the other fields will not be shown +<1> Shows if the index is being managed by data stream lifecycle. If the index is not managed by +a data stream lifecycle the other fields will not be shown <2> When the index was created, this timestamp is used to determine when to rollover <3> The time since the index creation (used for calculating when to rollover diff --git a/docs/reference/dlm/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc similarity index 91% rename from docs/reference/dlm/apis/get-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 5332e2a293b7a..64c8dab90ef61 100644 --- a/docs/reference/dlm/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -1,10 +1,10 @@ -[[dlm-get-lifecycle]] +[[data-streams-get-lifecycle]] === Get the lifecycle of a data stream ++++ Get Data Stream Lifecycle ++++ -experimental::[] +preview::[] Gets the lifecycle of a set of data streams. @@ -15,12 +15,12 @@ Gets the lifecycle of a set of data streams. <>, the `manage_data_stream_lifecycle` index privilege, or the `view_index_metadata` privilege to use this API. For more information, see <>. -[[dlm-get-lifecycle-request]] +[[data-streams-get-lifecycle-request]] ==== {api-request-title} `GET _data_stream//_lifecycle` -[[dlm-get-lifecycle-desc]] +[[data-streams-get-lifecycle-desc]] ==== {api-description-title} Gets the lifecycle of the specified data streams. If multiple data streams are requested but at least one of them @@ -28,7 +28,7 @@ does not exist, then the API will respond with `404` since at least one of the r If the requested data streams do not have a lifecycle configured they will still be included in the API response but the `lifecycle` key will be missing. -[[dlm-get-lifecycle-path-params]] +[[data-streams-get-lifecycle-path-params]] ==== {api-path-parms-title} ``:: @@ -75,12 +75,12 @@ duration the document could be deleted. When undefined, every document in this d `rollover`:: (Optional, object) The conditions which will trigger the rollover of a backing index as configured by the cluster setting -`cluster.lifecycle.default.rollover`. This property is an implementation detail and it will only be retrieved when the query -param `include_defaults` is set to `true`. The contents of this field are subject to change. +`cluster.lifecycle.default.rollover`. This property is an implementation detail and it will only be retrieved +when the query param `include_defaults` is set to `true`. The contents of this field are subject to change. ===== ==== -[[dlm-get-lifecycle-example]] +[[data-streams-get-lifecycle-example]] ==== {api-examples-title} //// diff --git a/docs/reference/dlm/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc similarity index 91% rename from docs/reference/dlm/apis/put-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 3a77ae984b264..1fe0bdb3ee9c4 100644 --- a/docs/reference/dlm/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -1,10 +1,10 @@ -[[dlm-put-lifecycle]] +[[data-streams-put-lifecycle]] === Set the lifecycle of a data stream ++++ Put Data Stream Lifecycle ++++ -experimental::[] +preview::[] Configures the data lifecycle for the targeted data streams. @@ -14,18 +14,18 @@ Configures the data lifecycle for the targeted data streams. If the {es} {security-features} are enabled, you must have the `manage_data_stream_lifecycle` index privilege or higher to use this API. For more information, see <>. -[[dlm-put-lifecycle-request]] +[[data-streams-put-lifecycle-request]] ==== {api-request-title} `PUT _data_stream//_lifecycle` -[[dlm-put-lifecycle-desc]] +[[data-streams-put-lifecycle-desc]] ==== {api-description-title} Configures the data lifecycle for the targeted data streams. If multiple data streams are provided but at least one of them does not exist, then the update of the lifecycle will fail for all of them and the API will respond with `404`. -[[dlm-put-lifecycle-path-params]] +[[data-streams-put-lifecycle-path-params]] ==== {api-path-parms-title} ``:: @@ -55,7 +55,7 @@ If defined, every document added to this data stream will be stored at least for duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. ==== -[[dlm-put-lifecycle-example]] +[[data-streams-put-lifecycle-example]] ==== {api-examples-title} The following example sets the lifecycle of `my-data-stream`: diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc new file mode 100644 index 0000000000000..9aacf14d8e61a --- /dev/null +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -0,0 +1,64 @@ +[role="xpack"] +[[data-stream-lifecycle]] +== Data stream lifecycle + +preview::[] + +A data stream lifecycle is the built-in mechanism data streams use to manage their lifecycle. It enables you to easily +automate the management of your data streams according to your retention requirements. For example, you could configure +the lifecycle to: + +* Ensure that data indexed in the data stream will be kept at least for the retention time you defined. +* Ensure that data older than the retention period will be deleted automatically by {es} at a later time. + +To achieve that, it supports: + +* Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance +and backwards incompatible mapping changes. +* Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. +{es} is allowed at a later time to delete data older than this time period. + +[discrete] +[[data-streams-lifecycle-how-it-works]] +=== How does it work? + +In intervals configured by <>, {es} goes over +each data stream and performs the following steps: + +1. Checks if the data stream has a data lifecycle configured, skipping any indices not part of a managed data stream. +2. Rolls over the write index of the data stream, if it fulfills the conditions defined by +<>. +3. Applies retention to the remaining backing indices. This means deleting the backing indices whose +`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing +indices and it is either the time since the backing index got rolled over, or the time optionally configured in the +<> setting. + +IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing +index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but +the minimum time data will be stored. + +NOTE: The steps `2` and `3` apply only to backing indices that are not already managed by {ilm-init}, meaning that these indices either do +not have an {ilm-init} policy defined, or if they do, they have <> +set to `false`. + +[discrete] +[[data-stream-lifecycle-configuration]] +=== Configuring data stream lifecycle + +Since the lifecycle is configured on the data stream level, the process to configure a lifecycle on a new data stream and +on an existing one differ. + +In the following sections, we will go through the following tutorials: + +* To create a new data stream with a lifecycle, you need to add the data lifecycle as part of the index template +that matches the name of your data stream (see <>). When a write operation +with the name of your data stream reaches {es} then the data stream will be created with the respective data lifecycle. +* To update the lifecycle of an existing data stream you need to use the <> +to edit the lifecycle on the data stream itself (see <>). + +NOTE: Updating the data lifecycle of an existing data stream is different from updating the settings or the mapping, +because it is applied on the data stream level and not on the individual backing indices. + +include::tutorial-manage-new-data-stream.asciidoc[] + +include::tutorial-manage-existing-data-stream.asciidoc[] \ No newline at end of file diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc new file mode 100644 index 0000000000000..31d6fce2c5295 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc @@ -0,0 +1,136 @@ +[role="xpack"] +[[tutorial-manage-existing-data-stream]] +=== Tutorial: Update existing data stream + +preview::[] + +To update the lifecycle of an existing data stream you do the following actions: + +. <> +. <> + +[discrete] +[[set-lifecycle]] +==== Set a data stream's lifecycle + +To add or to change the retention period of your data stream you can use the <>. + +* You can set infinite retention period, meaning that your data should never be deleted. For example: ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ } <1> +---- +// TEST[setup:my_data_stream] +<1> An empty payload means that your data stream is still managed but the data will never be deleted. Managing a time +series data stream such as logs or metrics enables {es} to better store your data even if you do not use a retention period. + +* Or you can set the retention period of your choice. For example: ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ + "data_retention": "30d" <1> +} +---- +// TEST[continued] +<1> The retention period of this data stream is set to 30 days. This means that {es} is allowed to delete data that is +older than 30 days at its own discretion. + +The changes in the lifecycle are applied on all backing indices of the data stream. You can see the effect of the change +via the <>: + +[source,console] +-------------------------------------------------- +GET .ds-my-data-stream-*/_lifecycle/explain +-------------------------------------------------- +// TEST[continued] + +The response will look like: + +[source,console-result] +-------------------------------------------------- +{ + "indices": { + ".ds-my-data-stream-2023.04.19-000002": { + "index": ".ds-my-data-stream-2023.04.19-000002", <1> + "managed_by_lifecycle": true, <2> + "index_creation_date_millis": 1681919221417, + "time_since_index_creation": "6.85s", <3> + "lifecycle": { + "data_retention": "30d" <4> + } + }, + ".ds-my-data-stream-2023.04.17-000001": { + "index": ".ds-my-data-stream-2023.04.17-000001", <5> + "managed_by_lifecycle": true, <6> + "index_creation_date_millis": 1681745209501, + "time_since_index_creation": "48d", <7> + "rollover_date_millis": 1681919221419, + "time_since_rollover": "6.84s", <8> + "generation_time": "6.84s", <9> + "lifecycle": { + "data_retention": "30d" <10> + } + } + } +} +-------------------------------------------------- +// TEST[continued] +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of the backing index. +<2> This index is managed by a data stream lifecycle. +<3> The time that has passed since this index has been created. +<4> The data retention for this index is at least 30 days, as it was recently updated. +<5> The name of the backing index. +<6> This index is managed by the built-in data stream lifecycle. +<7> The time that has passed since this index has been created. +<8> The time that has passed since this index was <>. +<9> The time that will be used to determine when it's safe to delete this index and all its data. +<10> The data retention for this index as well is at least 30 days, as it was recently updated. + +[discrete] +[[delete-lifecycle]] +==== Remove lifecycle for a data stream + +To remove the lifecycle of a data stream you can use the <>. As consequence, +the maintenance operations that were applied by the lifecycle will no longer be applied to the data stream and all its +backing indices. For example: + +[source,console] +-------------------------------------------------- +DELETE _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- +// TEST[continued] + +You can then use the <> again to see that the indices are no longer managed. + +[source,console] +-------------------------------------------------- +GET .ds-my-data-stream-*/_lifecycle/explain +-------------------------------------------------- +// TEST[continued] +// TEST[teardown:data_stream_cleanup] + +[source,console-result] +-------------------------------------------------- +{ + "indices": { + ".ds-my-data-stream-2023.04.19-000002": { + "index": ".ds-my-data-stream-2023.04.19-000002", <1> + "managed_by_lifecycle": false <2> + }, + ".ds-my-data-stream-2023.04.17-000001": { + "index": ".ds-my-data-stream-2023.04.19-000001", <3> + "managed_by_lifecycle": false <4> + } + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of the backing index. +<2> Indication that the index is not managed by the data stream lifecycle. +<3> The name of another backing index. +<4> Indication that the index is not managed by the data stream lifecycle. diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc new file mode 100644 index 0000000000000..5d72709e47667 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc @@ -0,0 +1,148 @@ +[role="xpack"] +[[tutorial-manage-new-data-stream]] +=== Tutorial: Create a data stream with a lifecycle + +preview::[] + +To create a data stream with a built-in lifecycle, follow these steps: + +. <> +. <> +. <> + +[discrete] +[[create-index-template-with-lifecycle]] +==== Create an index template + +A data stream requires a matching <>. You can configure the data stream lifecycle by +setting the `lifecycle` field in the index template the same as you do for mappings and index settings. You can define an +index template that sets a lifecycle as follows: + +* Include the `data_stream` object to enable data streams. + +* Define the lifecycle in the template section or include a composable template that defines the lifecycle. + +* Use a priority higher than `200` to avoid collisions with built-in templates. +See <>. + +You can use the <>. + +[source,console] +-------------------------------------------------- +PUT _index_template/my-index-template +{ + "index_patterns": ["my-data-stream*"], + "data_stream": { }, + "priority": 500, + "template": { + "lifecycle": { + "data_retention": "7d" + } + }, + "_meta": { + "description": "Template with data stream lifecycle" + } +} +-------------------------------------------------- + +[discrete] +[[create-data-stream-with-lifecycle]] +==== Create a data stream + +You can create a data stream in two ways: + +. By manually creating the stream using the <>. The stream's name must +still match one of your template's index patterns. ++ +[source,console] +-------------------------------------------------- +PUT _data_stream/my-data-stream +-------------------------------------------------- +// TEST[continued] + +. By <> that +target the stream's name. This name must match one of your index template's index patterns. ++ +[source,console] +-------------------------------------------------- +PUT my-data-stream/_bulk +{ "create":{ } } +{ "@timestamp": "2099-05-06T16:21:15.000Z", "message": "192.0.2.42 - - [06/May/2099:16:21:15 +0000] \"GET /images/bg.jpg HTTP/1.0\" 200 24736" } +{ "create":{ } } +{ "@timestamp": "2099-05-06T16:25:42.000Z", "message": "192.0.2.255 - - [06/May/2099:16:25:42 +0000] \"GET /favicon.ico HTTP/1.0\" 200 3638" } +-------------------------------------------------- +// TEST[continued] + +[discrete] +[[retrieve-lifecycle-information]] +==== Retrieve lifecycle information + +You can use the <> to see the data lifecycle of your data stream and +the <> to see the exact state of each backing index. + +[source,console] +-------------------------------------------------- +GET _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- +// TEST[continued] + +The result will look like this: + +[source,console-result] +-------------------------------------------------- +{ + "data_streams": [ + { + "name": "my-data-stream",<1> + "lifecycle": { + "data_retention": "7d" <2> + } + } + ] +} +-------------------------------------------------- +<1> The name of your data stream. +<2> The retention period of the data indexed in this data stream, this means that the data in this data stream will +be kept at least for 7 days. After that {es} can delete it at its own discretion. + +If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the +<>: + +[source,console] +-------------------------------------------------- +GET .ds-my-data-stream-*/_lifecycle/explain +-------------------------------------------------- +// TEST[continued] +The result will look like this: + +[source,console-result] +-------------------------------------------------- +{ + "indices": { + ".ds-my-data-stream-2023.04.19-000001": { + "index": ".ds-my-data-stream-2023.04.19-000001", <1> + "managed_by_lifecycle": true, <2> + "index_creation_date_millis": 1681918009501, + "time_since_index_creation": "1.6m", <3> + "lifecycle": { <4> + "data_retention": "7d" + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of the backing index. +<2> If it is managed by the built-in data stream lifecycle. +<3> Time since the index was created. +<4> The lifecycle configuration that is applied on this backing index. + +////////////////////////// +[source,console] +-------------------------------------------------- +DELETE _data_stream/my-data-stream +DELETE _index_template/my-index-template +-------------------------------------------------- +// TEST[continued] + +////////////////////////// \ No newline at end of file diff --git a/docs/reference/dlm/apis/dlm-api.asciidoc b/docs/reference/dlm/apis/dlm-api.asciidoc deleted file mode 100644 index f2e4eaa343361..0000000000000 --- a/docs/reference/dlm/apis/dlm-api.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -[[data-lifecycle-management-api]] -== Data Lifecycle Management APIs - -You use the following APIs to configure the data lifecycle management for data streams -and to retrieve lifecycle information for backing indices. - -[discrete] -[[dlm-api-management-endpoint]] -=== Operation management APIs - -* <> -* <> -* <> -* <> - -include::put-lifecycle.asciidoc[] -include::get-lifecycle.asciidoc[] -include::delete-lifecycle.asciidoc[] -include::explain-data-lifecycle.asciidoc[] - diff --git a/docs/reference/ilm/index-rollover.asciidoc b/docs/reference/ilm/index-rollover.asciidoc index 3755619a6f15a..a1616807c9ea6 100644 --- a/docs/reference/ilm/index-rollover.asciidoc +++ b/docs/reference/ilm/index-rollover.asciidoc @@ -42,7 +42,7 @@ On each rollover, the new index becomes the write index. [[ilm-automatic-rollover]] === Automatic rollover -{ilm-init} enables you to automatically roll over to a new index based +{ilm-init} and the data stream lifecycle (in preview:[]]) enable you to automatically roll over to a new index based on conditions like the index size, document count, or age. When a rollover is triggered, a new index is created, the write alias is updated to point to the new index, and all subsequent updates are written to the new index. diff --git a/docs/reference/indices/get-component-template.asciidoc b/docs/reference/indices/get-component-template.asciidoc index 9e2ac1a798f8b..f3073406be2b1 100644 --- a/docs/reference/indices/get-component-template.asciidoc +++ b/docs/reference/indices/get-component-template.asciidoc @@ -72,7 +72,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=local] include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [[get-component-template-api-example]] diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 255d2c8039848..2b7a1646b800b 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -100,7 +100,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ds-expand-wildcards] Defaults to `open`. `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [role="child_attributes"] @@ -223,7 +223,7 @@ cluster can not write into this data stream or change its mappings. `lifecycle`:: (object) -Functionality in experimental:[]. Contains the configuration for the data lifecycle management of this data stream. +Functionality in preview:[]. Contains the configuration for the data lifecycle management of this data stream. + .Properties of `lifecycle` [%collapsible%open] diff --git a/docs/reference/indices/get-index-template.asciidoc b/docs/reference/indices/get-index-template.asciidoc index 1752b19e59d9c..9ae8af6f8441b 100644 --- a/docs/reference/indices/get-index-template.asciidoc +++ b/docs/reference/indices/get-index-template.asciidoc @@ -64,7 +64,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=local] include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [[get-template-api-example]] diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index 316eb45866f44..79394fccd046c 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -172,8 +172,8 @@ The API returns the following response: "attributes": {}, "roles": [...], "version": "8.10.0", - "minIndexVersion": "7000099", - "maxIndexVersion": "8100099" + "min_index_version": 7000099, + "max_index_version": 8100099 }, "allocation_id": "2iNySv_OQVePRX-yaRH_lQ", <4> "allocation" : "primary|replica|unused" <5> diff --git a/docs/reference/indices/simulate-index.asciidoc b/docs/reference/indices/simulate-index.asciidoc index d4c446a58eeca..5e5709a2d82fc 100644 --- a/docs/reference/indices/simulate-index.asciidoc +++ b/docs/reference/indices/simulate-index.asciidoc @@ -63,7 +63,7 @@ Name of the index to simulate. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [role="child_attributes"] diff --git a/docs/reference/indices/simulate-template.asciidoc b/docs/reference/indices/simulate-template.asciidoc index e876b2b9c519f..404aa70d72e70 100644 --- a/docs/reference/indices/simulate-template.asciidoc +++ b/docs/reference/indices/simulate-template.asciidoc @@ -95,7 +95,7 @@ Defaults to `false`. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [role="child_attributes"] diff --git a/docs/reference/migration/apis/feature-migration.asciidoc b/docs/reference/migration/apis/feature-migration.asciidoc index ab314ff79eabb..87903fbb7758e 100644 --- a/docs/reference/migration/apis/feature-migration.asciidoc +++ b/docs/reference/migration/apis/feature-migration.asciidoc @@ -56,85 +56,85 @@ Example response: "features" : [ { "feature_name" : "async_search", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "enrich", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "ent_search", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "fleet", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "geoip", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "kibana", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "logstash_management", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "machine_learning", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "searchable_snapshots", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "security", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "synonyms", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "tasks", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "transform", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "watcher", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] } @@ -142,7 +142,7 @@ Example response: "migration_status" : "NO_MIGRATION_NEEDED" } -------------------------------------------------- - +// TESTRESPONSE[s/"minimum_index_version" : "8100099"/"minimum_index_version" : $body.$_path/] When you submit a POST request to the `_migration/system_features` endpoint to start the migration process, the response indicates what features will be diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 9fc19576a9ddd..9feffa1793787 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -10,10 +10,12 @@ _leader_ index is replicated to one or more read-only _follower_ indices on your configure disaster recovery, bring data closer to your users, or establish a centralized reporting cluster to process reports locally. -<> enables you to run a search request -against one or more remote clusters. This capability provides each region -with a global view of all clusters, allowing you to send a search request from -a local cluster and return results from all connected remote clusters. +<> enables you to run a search request +against one or more remote clusters. This capability provides each region with a +global view of all clusters, allowing you to send a search request from a local +cluster and return results from all connected remote clusters. For full {ccs} +capabilities, the local and remote cluster must be on the same +{subscriptions}[subscription level]. Enabling and configuring security is important on both local and remote clusters. When connecting a local cluster to remote clusters, an {es} superuser diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 7000818aa9346..47140e93ad980 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1882,6 +1882,26 @@ Refer to <> for other Watcher examples. Refer to <>. +[role="exclude",id="dlm-delete-lifecycle"] +=== Delete the lifecycle of a data stream + +Refer to <>. + +[role="exclude",id="dlm-explain-lifecycle"] +=== Explain the lifecycle of a data stream + +Refer to <>. + +[role="exclude",id="dlm-get-lifecycle"] +=== Get the lifecycle of a data stream + +Refer to <>. + +[role="exclude",id="dlm-put-lifecycle"] +=== Update the lifecycle of a data stream + +Refer to <>. + [role="exclude",id="get-synonym-rule"] === Get synonym rule API diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index c67f83aa5f995..6245df668665a 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -20,7 +20,6 @@ not be included yet. * <> * <> * <> -* <> * <> * <> * <> @@ -98,4 +97,3 @@ include::{es-repo-dir}/transform/apis/index.asciidoc[] include::usage.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] include::defs.asciidoc[] -include::{es-repo-dir}/dlm/apis/dlm-api.asciidoc[] diff --git a/docs/reference/search-application/apis/get-search-application.asciidoc b/docs/reference/search-application/apis/get-search-application.asciidoc index 7c809e6c0ecad..0feba6145640e 100644 --- a/docs/reference/search-application/apis/get-search-application.asciidoc +++ b/docs/reference/search-application/apis/get-search-application.asciidoc @@ -8,7 +8,10 @@ beta::[] Get Search Application ++++ -Retrieves information about a Search Application. +Retrieves information about a search application. + +If the search application has an inconsistent state between its alias and configured indices, a warning header will be returned with the response. +To resolve this inconsistent state, issue an updated <> command. [[get-search-application-request]] ==== {api-request-title} diff --git a/docs/reference/search-application/apis/search-application-search.asciidoc b/docs/reference/search-application/apis/search-application-search.asciidoc index 8c9d750d089c2..eca006353e948 100644 --- a/docs/reference/search-application/apis/search-application-search.asciidoc +++ b/docs/reference/search-application/apis/search-application-search.asciidoc @@ -11,6 +11,9 @@ beta::[] Given specified query parameters, creates an Elasticsearch query to run. Any unspecified template parameters will be assigned their default values if applicable. +If the search application has an inconsistent state between its alias and configured indices, a warning header will be returned with the response. +To resolve this inconsistent state, issue an updated <> command. + [[search-application-search-request]] ==== {api-request-title} diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index b2a23e8cd7259..2137358a00de0 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -32,10 +32,11 @@ return score ==== Reciprocal rank fusion API You can use RRF as part of a <> to combine and rank -documents using multiple result sets from - -* 1 query and 1 or more kNN searches -* 2 or more kNN searches +documents using result sets from a combination of +<>, +<>, and/or +<>. A minimum of 2 results sets +is required for ranking from the specified sources. The `rrf` parameter is an optional object defined as part of a search request's <>. The `rrf` object contains the following @@ -95,8 +96,9 @@ truncated to `window_size`. If `k` is smaller than `window_size`, the results ar RRF does support: -* <> +* <> * <> +* <> RRF does not currently support: @@ -110,9 +112,61 @@ RRF does not currently support: * <> * <> -Using unsupported features as part of a search using RRF will result +Using unsupported features as part of a search with RRF results in an exception. +[[rrf-using-sub-searches]] +==== Reciprocal rank fusion using sub searches + +<> provides a way to +combine and rank multiple searches using RRF. + +An example request using RRF with sub searches: + +[source,console] +---- +GET example-index/_search +{ + "sub_searches": [ + { + "query": { + "term": { + "text": "blue shoes sale" + } + } + }, + { + "query": { + "text_expansion":{ + "ml.tokens":{ + "model_id":"my_elser_model", + "model_text":"What blue shoes are on sale?" + } + } + } + } + ], + "rank": { + "rrf": { + "window_size": 50, + "rank_constant": 20 + } + } +} +---- +// TEST[skip:example fragment] + +In the above example, we execute each of the two sub searches +independently of each other. First we run the term query for +`blue shoes sales` using the standard BM25 scoring algorithm. Then +we run the text expansion query for `What blue shoes are on sale?` +using our <> scoring algorithm. +RRF allows us to combine the two results sets generated by completely +independent scoring algorithms with equal weighting. Not only does this +remove the need to figure out what the appropriate weighting would be +using linear combination, but RRF is also shown to give improved +relevance over either query individually. + [[rrf-full-example]] ==== Reciprocal rank fusion full example diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 8f634bee2645a..bdaa703d98d54 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -32,6 +32,9 @@ run {es} on your own hardware, see <>. To ensure your remote cluster configuration supports {ccs}, see <>. +* For full {ccs} capabilities, the local and remote cluster must be on the same +{subscriptions}[subscription level]. + * The local coordinating node must have the <> node role. diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 96c2c4dd86525..72f316cdd9728 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -556,10 +556,10 @@ Period of time used to extend the life of the PIT. [[request-body-rank]] `rank`:: (Optional, object) Defines a method for combining and ranking result sets from -either: -+ -* 1 query and 1 or more kNN searches -* 2 or more kNN searches +a combination of <>, +<>, and/or +<>. Requires a minimum of 2 results sets for +ranking from the specified sources. + .Ranking methods [%collapsible%open] @@ -715,6 +715,29 @@ Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the <>. +[[request-body-sub-searches]] +`sub_searches`:: +(Optional, array of objects) +An array of `sub_search` objects where each `sub_search` is evaluated +independently, and their result sets are later combined as part of +<>. Each `sub_search` object is required to +contain a single `query`. `sub_searches` is only allowed with the +<> element, and is not allowed in conjunction +with a top-level <> element. ++ +`sub_searches` as part of a search: +[source,js] +---- +{ + "sub_searches": [ + { "query": {...} }, + { "query": {...} } + ] + ... +} +---- +// NOTCONSOLE + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=terminate_after] + Defaults to `0`, which does not terminate query execution early. diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc new file mode 100644 index 0000000000000..8ca60b75e282e --- /dev/null +++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc @@ -0,0 +1,51 @@ +[role="xpack"] +[[data-stream-lifecycle-settings]] +=== Data stream lifecycle settings in {es} +[subs="attributes"] +++++ +Data stream lifecycle settings +++++ + +preview::[] + +These are the settings available for configuring <>. + +==== Cluster level settings + +[[data-streams-lifecycle-poll-interval]] +`data_streams.lifecycle.poll_interval`:: +(<>, <>) +How often {es} checks what is the next action for all data streams with a built-in lifecycle. Defaults to `10m`. + +[[cluster-lifecycle-default-rollover]] +`cluster.lifecycle.default.rollover`:: +(<>, string) +This property accepts a key value pair formatted string and configures the conditions that would trigger a data stream +to <> when it has `lifecycle` configured. This property is an implementation detail and subject to +change. Currently, it defaults to `max_age=auto,max_primary_shard_size=50gb,min_docs=1,max_primary_shard_docs=200000000`, +this means that your data stream will rollover if any of the following conditions are met: + +* Either any primary shard reaches the size of 50GB, +* or any primary shard contains 200.000.000 documents +* or the index reaches a certain age which depends on the retention time of your data stream, +* **and** has at least one document. + +==== Index level settings +The following index-level settings are typically configured on the backing indices of a data stream. + +[[index-lifecycle-prefer-ilm]] +`index.lifecycle.prefer_ilm`:: +(<>, boolean) +This setting determines which feature is managing the backing index of a data stream if, and only if, the backing index +has an <> ({ilm-init}) policy and the data stream has also a built-in lifecycle. When +`true` this index is managed by {ilm-init}, when `false` the backing index is managed by the data stream lifecycle. +Defaults to `true`. + +[[index-data-stream-lifecycle-origination-date]] +`index.lifecycle.origination_date`:: +(<>, long) +If specified, this is the timestamp used to calculate the backing index generation age after this backing index has been +<>. The generation age is used to determine data retention, consequently, you can use this +setting if you create a backing index that contains older data and want to ensure that the retention period or +other parts of the lifecycle will be applied based on the data's original timestamp and not the timestamp they got +indexed. Specified as a Unix epoch value in milliseconds. \ No newline at end of file diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index 15482679a3069..e007b67a943b0 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -54,6 +54,8 @@ include::settings/health-diagnostic-settings.asciidoc[] include::settings/ilm-settings.asciidoc[] +include::settings/data-stream-lifecycle-settings.asciidoc[] + include::modules/indices/index_management.asciidoc[] include::modules/indices/recovery.asciidoc[] diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java index 30841ee36d41a..32347297fea73 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java @@ -403,7 +403,7 @@ public void declareFieldArray( */ public abstract void declareExclusiveFieldSet(String... exclusiveSet); - private static List parseArray(XContentParser parser, Context context, ContextParser itemParser) + public static List parseArray(XContentParser parser, Context context, ContextParser itemParser) throws IOException { final XContentParser.Token currentToken = parser.currentToken(); if (currentToken.isValue() diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index 9071812e965e0..e60b5daab737e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.datastreams; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -20,14 +21,18 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.ShardLimitValidator; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESSingleNodeTestCase; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; @@ -151,34 +156,33 @@ public void testLifecycleComposition() { } // One lifecycle results to this lifecycle as the final { - DataLifecycle lifecycle = switch (randomInt(2)) { - case 0 -> new DataLifecycle(); - case 1 -> new DataLifecycle(DataLifecycle.Retention.NULL); - default -> new DataLifecycle(randomMillisUpToYear9999()); - }; + DataLifecycle lifecycle = new DataLifecycle(randomRetention(), randomDownsampling()); List lifecycles = List.of(lifecycle); - assertThat(composeDataLifecycles(lifecycles), equalTo(lifecycle)); + DataLifecycle result = composeDataLifecycles(lifecycles); + assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If the last lifecycle is missing a property we keep the latest from the previous ones { - DataLifecycle lifecycleWithRetention = new DataLifecycle(randomMillisUpToYear9999()); - List lifecycles = List.of(lifecycleWithRetention, new DataLifecycle()); - assertThat( - composeDataLifecycles(lifecycles).getEffectiveDataRetention(), - equalTo(lifecycleWithRetention.getEffectiveDataRetention()) - ); + DataLifecycle lifecycle = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); + List lifecycles = List.of(lifecycle, new DataLifecycle()); + DataLifecycle result = composeDataLifecycles(lifecycles); + assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If both lifecycle have all properties, then the latest one overwrites all the others { - DataLifecycle lifecycle1 = new DataLifecycle(randomMillisUpToYear9999()); - DataLifecycle lifecycle2 = new DataLifecycle(randomMillisUpToYear9999()); + DataLifecycle lifecycle1 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); + DataLifecycle lifecycle2 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); List lifecycles = List.of(lifecycle1, lifecycle2); - assertThat(composeDataLifecycles(lifecycles), equalTo(lifecycle2)); + DataLifecycle result = composeDataLifecycles(lifecycles); + assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle2.getEffectiveDataRetention())); + assertThat(result.getDownsamplingRounds(), equalTo(lifecycle2.getDownsamplingRounds())); } // If the last lifecycle is explicitly null, the result is also null { - DataLifecycle lifecycle1 = new DataLifecycle(randomMillisUpToYear9999()); - DataLifecycle lifecycle2 = new DataLifecycle(randomMillisUpToYear9999()); + DataLifecycle lifecycle1 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); + DataLifecycle lifecycle2 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); List lifecycles = List.of(lifecycle1, lifecycle2, Template.NO_LIFECYCLE); assertThat(composeDataLifecycles(lifecycles), nullValue()); } @@ -224,4 +228,49 @@ public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNo return new ShardLimitValidator(limitOnlySettings, clusterService); } + @Nullable + private static DataLifecycle.Retention randomRetention() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> randomNonEmptyRetention(); + }; + } + + private static DataLifecycle.Retention randomNonEmptyRetention() { + return new DataLifecycle.Retention(TimeValue.timeValueMillis(randomMillisUpToYear9999())); + } + + @Nullable + private static DataLifecycle.Downsampling randomDownsampling() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> randomNonEmptyDownsampling(); + }; + } + + private static DataLifecycle.Downsampling randomNonEmptyDownsampling() { + var count = randomIntBetween(0, 10); + List rounds = new ArrayList<>(); + var previous = new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(randomIntBetween(1, 365)), + new DownsampleConfig(new DateHistogramInterval(randomIntBetween(1, 24) + "h")) + ); + rounds.add(previous); + for (int i = 0; i < count; i++) { + DataLifecycle.Downsampling.Round round = nextRound(previous); + rounds.add(round); + previous = round; + } + return new DataLifecycle.Downsampling(rounds); + } + + private static DataLifecycle.Downsampling.Round nextRound(DataLifecycle.Downsampling.Round previous) { + var after = TimeValue.timeValueDays(previous.after().days() + randomIntBetween(1, 10)); + var fixedInterval = new DownsampleConfig( + new DateHistogramInterval((previous.config().getFixedInterval().estimateMillis() * randomIntBetween(2, 5)) + "ms") + ); + return new DataLifecycle.Downsampling.Round(after, fixedInterval); + } } diff --git a/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java b/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java index c161bd62aed82..e1326e0117217 100644 --- a/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java +++ b/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java @@ -24,7 +24,7 @@ import java.util.List; import static org.elasticsearch.dlm.DLMFixtures.putComposableIndexTemplate; -import static org.elasticsearch.dlm.DLMFixtures.randomDataLifecycle; +import static org.elasticsearch.dlm.DLMFixtures.randomLifecycle; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -42,7 +42,7 @@ protected boolean ignoreExternalCluster() { } public void testGetLifecycle() throws Exception { - DataLifecycle lifecycle = randomDataLifecycle(); + DataLifecycle lifecycle = randomLifecycle(); putComposableIndexTemplate("id1", null, List.of("with-lifecycle*"), null, null, lifecycle); putComposableIndexTemplate("id2", null, List.of("without-lifecycle*"), null, null, null); { diff --git a/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java b/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java index 061a1dcf50a5c..8ee0e09319f84 100644 --- a/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java +++ b/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java @@ -59,7 +59,7 @@ import java.util.List; import java.util.function.Supplier; -import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN; +import static org.elasticsearch.cluster.metadata.DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; /** * Plugin encapsulating Data Lifecycle Management Service. @@ -109,7 +109,7 @@ public Collection createComponents( dataLifecycleInitialisationService.set( new DataLifecycleService( settings, - new OriginSettingClient(client, DLM_ORIGIN), + new OriginSettingClient(client, DATA_STREAM_LIFECYCLE_ORIGIN), clusterService, getClock(), threadPool, diff --git a/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java b/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java index f2ab7fbd55009..cc517d7852832 100644 --- a/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java +++ b/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.cluster.metadata.DataStream; @@ -22,16 +23,19 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import static org.apache.lucene.tests.util.LuceneTestCase.rarely; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.test.ESIntegTestCase.client; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.elasticsearch.test.ESTestCase.randomMillisUpToYear9999; import static org.junit.Assert.assertTrue; /** @@ -93,12 +97,47 @@ static void putComposableIndexTemplate( assertTrue(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet().isAcknowledged()); } - static DataLifecycle randomDataLifecycle() { - return switch (randomInt(3)) { - case 0 -> new DataLifecycle(); - case 1 -> new DataLifecycle(DataLifecycle.Retention.NULL); - case 2 -> Template.NO_LIFECYCLE; - default -> new DataLifecycle(TimeValue.timeValueDays(randomIntBetween(1, 365))); + static DataLifecycle randomLifecycle() { + return rarely() ? Template.NO_LIFECYCLE : new DataLifecycle(randomRetention(), randomDownsampling()); + } + + @Nullable + private static DataLifecycle.Retention randomRetention() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> new DataLifecycle.Retention(TimeValue.timeValueMillis(randomMillisUpToYear9999())); }; } + + @Nullable + private static DataLifecycle.Downsampling randomDownsampling() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> { + var count = randomIntBetween(0, 10); + List rounds = new ArrayList<>(); + var previous = new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(randomIntBetween(1, 365)), + new DownsampleConfig(new DateHistogramInterval(randomIntBetween(1, 24) + "h")) + ); + rounds.add(previous); + for (int i = 0; i < count; i++) { + DataLifecycle.Downsampling.Round round = nextRound(previous); + rounds.add(round); + previous = round; + } + yield new DataLifecycle.Downsampling(rounds); + } + }; + } + + private static DataLifecycle.Downsampling.Round nextRound(DataLifecycle.Downsampling.Round previous) { + var after = TimeValue.timeValueDays(previous.after().days() + randomIntBetween(1, 10)); + var fixedInterval = new DownsampleConfig( + new DateHistogramInterval((previous.config().getFixedInterval().estimateMillis() * randomIntBetween(2, 5)) + "ms") + ); + return new DataLifecycle.Downsampling.Round(after, fixedInterval); + } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index b09c406857a3b..d32848b529fdb 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.mustache.MultiSearchTemplateResponse.Item; import org.elasticsearch.search.DummyQueryParserPlugin; +import org.elasticsearch.search.FailBeforeCurrentVersionQueryBuilder; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.json.JsonXContent; @@ -200,9 +201,14 @@ public void testCCSCheckCompatibility() throws Exception { Exception ex = response.getFailure(); assertThat(ex.getMessage(), containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version")); assertThat(ex.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); - assertEquals( - "This query isn't serializable with transport versions before " + TransportVersion.current(), - ex.getCause().getMessage() + + String expectedCause = Strings.format( + "[fail_before_current_version] was released first in version %s, failed compatibility " + + "check trying to send it to node with version %s", + FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION, + TransportVersion.MINIMUM_CCS_VERSION ); + String actualCause = ex.getCause().getMessage(); + assertEquals(expectedCause, actualCause); } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index c2d48fccf27d0..76605267e0e15 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequest; @@ -365,14 +364,22 @@ public void testCCSCheckCompatibility() throws Exception { ExecutionException.class, () -> client().execute(SearchTemplateAction.INSTANCE, request).get() ); + + Throwable primary = ex.getCause(); + assertNotNull(primary); + + Throwable underlying = primary.getCause(); + assertNotNull(underlying); + assertThat( - ex.getCause().getMessage(), + primary.getMessage(), containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version") ); - assertThat(ex.getCause().getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); - assertEquals( - "This query isn't serializable with transport versions before " + TransportVersion.current(), - ex.getCause().getCause().getMessage() - ); + assertThat(primary.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); + + String expectedCause = "[fail_before_current_version] was released first in version XXXXXXX, failed compatibility check trying to" + + " send it to node with version XXXXXXX"; + String actualCause = underlying.getMessage().replaceAll("\\d{7,}", "XXXXXXX"); + assertEquals(expectedCause, actualCause); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java index ce20cc5319980..052749d6f666c 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java @@ -12,14 +12,17 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.tasks.TaskId; import java.util.List; import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.Scope.INTERNAL; import static org.elasticsearch.rest.action.admin.cluster.RestListTasksAction.listTasksResponseListener; +@ServerlessScope(INTERNAL) public class RestRethrottleAction extends BaseRestHandler { private final Supplier nodesInCluster; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 08b91f304a704..2b6193b52cc0a 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -170,7 +170,7 @@ public void testEnforcedCooldownPeriod() throws IOException { fakeOldSnapshot, new RepositoryData.SnapshotDetails( SnapshotState.SUCCESS, - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion(), + SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion().indexVersion, 0L, // -1 would refresh RepositoryData and find the real version 0L, // -1 would refresh RepositoryData and find the real version, "" // null would refresh RepositoryData and find the real version diff --git a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java index 84f21129e02f3..b54b7ac48948e 100644 --- a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java +++ b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java @@ -113,7 +113,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .field("lucene_version", version.luceneVersion().toString()) .field("minimum_wire_compatibility_version", version.minimumCompatibilityVersion().toString()) .field("minimum_index_compatibility_version", version.minimumIndexCompatibilityVersion().toString()) - .field("transport_version", transportVersion.toString()) + .field("transport_version", transportVersion != null ? transportVersion.toString() : "unknown") .endObject(); builder.field("tagline", "You Know, for Search"); builder.endObject(); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index ca7936304c075..52e4bdf0e6fc1 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -41,6 +41,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Matchers; import org.junit.Before; import org.junit.ClassRule; import org.junit.rules.RuleChain; @@ -74,6 +75,7 @@ import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -1303,7 +1305,11 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); - assertEquals(singletonList(tookOnVersion.toString()), XContentMapValues.extractValue("snapshots.version", snapResponse)); + // the format can change depending on the ES node version running & this test code running + assertThat( + XContentMapValues.extractValue("snapshots.version", snapResponse), + either(Matchers.equalTo(List.of(tookOnVersion.toString()))).or(equalTo(List.of(tookOnVersion.indexVersion.toString()))) + ); // Remove the routing setting and template so we can test restoring them. Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java index 40d3c50e9a445..74ba81e9555e2 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java @@ -17,6 +17,7 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -92,8 +93,8 @@ public void testGetFeatureUpgradeStatus() throws Exception { .findFirst() .orElse(Collections.emptyMap()); - assertThat(feature.size(), equalTo(4)); - assertThat(feature.get("minimum_index_version"), equalTo(UPGRADE_FROM_VERSION.toString())); + assertThat(feature, aMapWithSize(4)); + assertThat(feature.get("minimum_index_version"), equalTo(Integer.toString(UPGRADE_FROM_VERSION.id))); if (UPGRADE_FROM_VERSION.before(TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION)) { assertThat(feature.get("migration_status"), equalTo("MIGRATION_NEEDED")); } else { diff --git a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java index 32e5abfb9a3e0..48f987d0359f0 100644 --- a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java +++ b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java @@ -8,12 +8,12 @@ package org.elasticsearch.system.indices; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.After; @@ -61,7 +61,7 @@ public void testGetFeatureUpgradedStatuses() throws Exception { .orElse(Collections.emptyMap()); assertThat(testFeature.size(), equalTo(4)); - assertThat(testFeature.get("minimum_index_version"), equalTo(Version.CURRENT.toString())); + assertThat(testFeature.get("minimum_index_version"), equalTo(IndexVersion.current().toString())); assertThat(testFeature.get("migration_status"), equalTo("NO_MIGRATION_NEEDED")); assertThat(testFeature.get("indices"), instanceOf(List.class)); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json index 96aa5d21e62c8..bf153ff72e35a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json @@ -1,7 +1,7 @@ { "indices.delete_data_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-delete-lifecycle.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html", "description":"Deletes the data lifecycle of the selected data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json index 51ee3b554f1ba..3232407000b19 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json @@ -1,8 +1,8 @@ { "indices.explain_data_lifecycle": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/dlm-explain-lifecycle.html", - "description": "Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc." + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html", + "description": "Retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc." }, "stability": "experimental", "visibility": "public", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json index 7cbfbfb9e5008..2cb934c84bcb7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json @@ -1,7 +1,7 @@ { "indices.get_data_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-get-lifecycle.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html", "description":"Returns the data lifecycle of the selected data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json index 63cf5addd3bb7..b2f19bdc3fc10 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json @@ -1,7 +1,7 @@ { "indices.put_data_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-put-lifecycle.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html", "description":"Updates the data lifecycle of the selected data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index f34aef9b83321..c67ae7c0bfd58 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -94,8 +94,9 @@ setup: --- "kNN search plus query": - skip: - version: ' - 8.3.99' - reason: 'kNN added to search endpoint in 8.4' + version: all #' - 8.3.99' + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97144" + # reason: 'kNN added to search endpoint in 8.4' - do: search: index: test @@ -121,8 +122,9 @@ setup: --- "kNN multi-field search with query": - skip: - version: ' - 8.6.99' - reason: 'multi-field kNN search added to search endpoint in 8.7' + version: all #' - 8.6.99' + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97144" + # reason: 'multi-field kNN search added to search endpoint in 8.7' - do: search: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml index 873b6d87cac66..4d003a5c3b7b4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml @@ -66,6 +66,9 @@ setup: --- "kNN search plus query": + - skip: + version: all + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97144" - do: search: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml index d78f6c5c7e3ad..f993b18ddd8ab 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml @@ -1,8 +1,8 @@ --- "avg aggregation on counter field": - skip: - version: " - 8.6.99" - reason: "counter field support added in 8.7" + version: " - 8.9.99" + reason: "counter field support added in 8.7, but exception message changed in 8.10.0" - do: indices.create: @@ -43,7 +43,7 @@ - match: { aggregations.the_counter_avg.value: null } - do: - catch: /Field \[counter_field\] of type \[long\] is not supported for aggregation \[avg\]/ + catch: /Field \[counter_field\] of type \[long\]\[counter\] is not supported for aggregation \[avg\]/ search: index: myindex2 body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml index 1662c4a591eb8..89e8ebb3e24ea 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml @@ -73,10 +73,10 @@ multi-valued fields unsupported: --- "avg aggregation on position field unsupported": - skip: - version: " - 8.7.99" - reason: position metric introduced in 8.8.0 + version: " - 8.9.99" + reason: position metric introduced in 8.8.0, but exception message changed in 8.10.0 - do: - catch: /Field \[location\] of type \[geo_point\] is not supported for aggregation \[avg\]/ + catch: /Field \[location\] of type \[geo_point\]\[position\] is not supported for aggregation \[avg\]/ search: index: locations body: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 08d69e65acb71..d0311740fc637 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.shards; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -20,6 +19,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotState; @@ -294,7 +294,7 @@ public void testRestoreSnapshotOverLimit() { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); // Test restore after index deletion logger.info("--> delete indices"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index beafe14079b42..8731c319043a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -88,9 +88,12 @@ public void onAllNodesStopped() throws Exception { final UnassignedInfo unassignedInfo = allocationExplainResponse.getExplanation().getUnassignedInfo(); assertThat(description, unassignedInfo, not(nullValue())); assertThat(description, unassignedInfo.getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); - final Throwable cause = ExceptionsHelper.unwrap(unassignedInfo.getFailure(), TranslogCorruptedException.class); - assertThat(description, cause, not(nullValue())); - assertThat(description, cause.getMessage(), containsString(translogPath.toString())); + var failure = unassignedInfo.getFailure(); + assertNotNull(failure); + final Throwable cause = ExceptionsHelper.unwrap(failure, TranslogCorruptedException.class); + if (cause != null) { + assertThat(description, cause.getMessage(), containsString(translogPath.toString())); + } }); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 80660a8751549..393c9db8c79e0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -679,7 +678,7 @@ public void testSnapshotRecovery() throws Exception { SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( ((SnapshotRecoverySource) recoveryState.getRecoverySource()).restoreUUID(), new Snapshot(REPO_NAME, createSnapshotResponse.getSnapshotInfo().snapshotId()), - Version.CURRENT, + IndexVersion.current(), repositoryData.resolveIndexId(INDEX_NAME) ); assertRecoveryState(recoveryState, 0, recoverySource, true, Stage.DONE, null, nodeA); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index ce98c1721c0c6..8662e2748feba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.IndexMetaDataGenerations; import org.elasticsearch.repositories.Repository; @@ -340,7 +341,7 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { ); final RepositoryData finalRepositoryData = getRepositoryData(repoName); for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) { - assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT)); + assertThat(finalRepositoryData.getVersion(snapshotId), is(IndexVersion.current())); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index be1bdbfcdd9aa..0a08c02116cde 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; @@ -25,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.FileRestoreContext; @@ -905,8 +905,8 @@ public void testFailOnAncientVersion() throws Exception { final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); createRepository(repoName, FsRepository.TYPE, repoPath); - final Version oldVersion = Version.CURRENT.previousMajor().previousMajor(); - final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); + final IndexVersion oldVersion = IndexVersion.fromId(IndexVersion.MINIMUM_COMPATIBLE.id() - 1); + final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion.toVersion()); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).execute().actionGet() @@ -917,7 +917,7 @@ public void testFailOnAncientVersion() throws Exception { "the snapshot was created with Elasticsearch version [" + oldVersion + "] which is below the current versions minimum index compatibility version [" - + Version.CURRENT.minimumIndexCompatibilityVersion() + + IndexVersion.MINIMUM_COMPATIBLE + "]" ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 6ac4ddba72417..97d0a8332cff5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -11,7 +11,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; @@ -49,6 +48,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.shard.IndexShard; @@ -160,7 +160,7 @@ public void testBasicWorkFlow() throws Exception { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); if (snapshotClosed) { assertAcked(indicesAdmin().prepareOpen(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get()); @@ -2097,7 +2097,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); logger.info("--> deleting indices"); cluster().wipeIndices(normalIndex, hiddenIndex, dottedHiddenIndex); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 87c17de72e531..4f3c61c0df45e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; @@ -26,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; @@ -87,7 +87,7 @@ public void testStatusApiConsistency() throws Exception { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); final List snapshotStatus = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap") diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 42125556f771c..cfd1f571e9676 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -371,6 +371,7 @@ opens org.elasticsearch.common.logging to org.apache.logging.log4j.core; exports org.elasticsearch.action.dlm; + exports org.elasticsearch.action.downsample; provides java.util.spi.CalendarDataProvider with org.elasticsearch.common.time.IsoCalendarDataProvider; provides org.elasticsearch.xcontent.ErrorOnUnknown with org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown; diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 4b6c768c4efee..ab0cc91aa02c0 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -25,38 +25,43 @@ import java.util.TreeMap; /** - * Represents the version of the wire protocol used to communicate between ES nodes. + * Represents the version of the wire protocol used to communicate between a pair of ES nodes. *

- * Prior to 8.8.0, the release {@link Version} was used everywhere. This class separates the wire protocol version - * from the release version. + * Prior to 8.8.0, the release {@link Version} was used everywhere. This class separates the wire protocol version from the release version. *

- * Each transport version constant has an id number, which for versions prior to 8.9.0 is the same as the release version - * for backwards compatibility. In 8.9.0 this is changed to an incrementing number, disconnected from the release version. + * Each transport version constant has an id number, which for versions prior to 8.9.0 is the same as the release version for backwards + * compatibility. In 8.9.0 this is changed to an incrementing number, disconnected from the release version. *

- * Each version constant has a unique id string. This is not actually used in the binary protocol, but is there to ensure - * each protocol version is only added to the source file once. This string needs to be unique (normally a UUID, - * but can be any other unique nonempty string). - * If two concurrent PRs add the same transport version, the different unique ids cause a git conflict, ensuring the second PR to be merged - * must be updated with the next free version first. Without the unique id string, git will happily merge the two versions together, - * resulting in the same transport version being used across multiple commits, - * causing problems when you try to upgrade between those two merged commits. + * Each version constant has a unique id string. This is not actually used in the binary protocol, but is there to ensure each protocol + * version is only added to the source file once. This string needs to be unique (normally a UUID, but can be any other unique nonempty + * string). If two concurrent PRs add the same transport version, the different unique ids cause a git conflict, ensuring that the second PR + * to be merged must be updated with the next free version first. Without the unique id string, git will happily merge the two versions + * together, resulting in the same transport version being used across multiple commits, causing problems when you try to upgrade between + * those two merged commits. *

Version compatibility

- * The earliest compatible version is hardcoded in the {@link #MINIMUM_COMPATIBLE} field. Previously, this was dynamically calculated - * from the major/minor versions of {@link Version}, but {@code TransportVersion} does not have separate major/minor version numbers. - * So the minimum compatible version is hard-coded as the transport version used by the highest minor release of the previous major version. - * {@link #MINIMUM_COMPATIBLE} should be updated appropriately whenever a major release happens. + * The earliest compatible version is hardcoded in the {@link #MINIMUM_COMPATIBLE} field. Previously, this was dynamically calculated from + * the major/minor versions of {@link Version}, but {@code TransportVersion} does not have separate major/minor version numbers. So the + * minimum compatible version is hard-coded as the transport version used by the highest minor release of the previous major version. {@link + * #MINIMUM_COMPATIBLE} should be updated appropriately whenever a major release happens. *

- * The earliest CCS compatible version is hardcoded at {@link #MINIMUM_CCS_VERSION}, as the transport version used by the - * previous minor release. This should be updated appropriately whenever a minor release happens. + * The earliest CCS compatible version is hardcoded at {@link #MINIMUM_CCS_VERSION}, as the transport version used by the previous minor + * release. This should be updated appropriately whenever a minor release happens. *

Adding a new version

- * A new transport version should be added every time a change is made to the serialization protocol of one or more classes. - * Each transport version should only be used in a single merged commit (apart from BwC versions copied from {@link Version}). + * A new transport version should be added every time a change is made to the serialization protocol of one or more classes. Each + * transport version should only be used in a single merged commit (apart from BwC versions copied from {@link Version}). *

- * To add a new transport version, add a new constant at the bottom of the list that is one greater than the current highest version, - * ensure it has a unique id, and update the {@link CurrentHolder#CURRENT} constant to point to the new version. + * To add a new transport version, add a new constant at the bottom of the list that is one greater than the current highest version, ensure + * it has a unique id, and update the {@link CurrentHolder#CURRENT} constant to point to the new version. *

Reverting a transport version

- * If you revert a commit with a transport version change, you must ensure there is a new transport version - * representing the reverted change. Do not let the transport version go backwards, it must always be incremented. + * If you revert a commit with a transport version change, you must ensure there is a new transport version representing + * the reverted change. Do not let the transport version go backwards, it must always be incremented. + *

Scope of usefulness of {@link TransportVersion}

+ * {@link TransportVersion} is a property of the transport connection between a pair of nodes, and should not be used as an indication of + * the version of any single node. The {@link TransportVersion} of a connection is negotiated between the nodes via some logic that is not + * totally trivial, and may change in future. Any other places that might make decisions based on this version effectively have to reproduce + * this negotiation logic, which would be fragile. If you need to make decisions based on the version of a single node, do so using a + * different version value. If you need to know whether the cluster as a whole speaks a new enough {@link TransportVersion} to understand a + * newly-added feature, use {@link org.elasticsearch.cluster.ClusterState#getMinTransportVersion}. */ public record TransportVersion(int id) implements Comparable { @@ -152,9 +157,13 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId public static final TransportVersion V_8_500_022 = registerTransportVersion(8_500_022, "4993c724-7a81-4955-84e7-403484610091"); public static final TransportVersion V_8_500_023 = registerTransportVersion(8_500_023, "01b06435-5d73-42ff-a121-3b36b771375e"); public static final TransportVersion V_8_500_024 = registerTransportVersion(8_500_024, "db337007-f823-4dbd-968e-375383814c17"); + public static final TransportVersion V_8_500_025 = registerTransportVersion(8_500_025, "b2ab7b75-5ac2-4a3b-bbb6-8789ca66722d"); + public static final TransportVersion V_8_500_026 = registerTransportVersion(8_500_026, "965d294b-14aa-4abb-bcfc-34631187941d"); + public static final TransportVersion V_8_500_027 = registerTransportVersion(8_500_027, "B151D967-8E7C-401C-8275-0ABC06335F2D"); + public static final TransportVersion V_8_500_028 = registerTransportVersion(8_500_028, "a6592d08-15cb-4e1a-b9b4-b2ba24058444"); private static class CurrentHolder { - private static final TransportVersion CURRENT = findCurrent(V_8_500_024); + private static final TransportVersion CURRENT = findCurrent(V_8_500_028); // finds the pluggable current version, or uses the given fallback private static TransportVersion findCurrent(TransportVersion fallback) { diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 3c126e8604c98..a03914d3e5093 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -112,6 +112,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_17_9 = new Version(7_17_09_99, IndexVersion.V_7_17_9); public static final Version V_7_17_10 = new Version(7_17_10_99, IndexVersion.V_7_17_10); public static final Version V_7_17_11 = new Version(7_17_11_99, IndexVersion.V_7_17_11); + public static final Version V_7_17_12 = new Version(7_17_12_99, IndexVersion.V_7_17_12); public static final Version V_8_0_0 = new Version(8_00_00_99, IndexVersion.V_8_0_0); public static final Version V_8_0_1 = new Version(8_00_01_99, IndexVersion.V_8_0_1); public static final Version V_8_1_0 = new Version(8_01_00_99, IndexVersion.V_8_1_0); @@ -142,6 +143,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_8_8_0 = new Version(8_08_00_99, IndexVersion.V_8_8_0); public static final Version V_8_8_1 = new Version(8_08_01_99, IndexVersion.V_8_8_1); public static final Version V_8_8_2 = new Version(8_08_02_99, IndexVersion.V_8_8_2); + public static final Version V_8_8_3 = new Version(8_08_03_99, IndexVersion.V_8_8_3); public static final Version V_8_9_0 = new Version(8_09_00_99, IndexVersion.V_8_9_0); public static final Version V_8_10_0 = new Version(8_10_00_99, IndexVersion.V_8_10_0); public static final Version CURRENT = V_8_10_0; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java index 74ac542214a98..4c2b97af9bd26 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java @@ -9,12 +9,12 @@ package org.elasticsearch.action.admin.cluster.migration; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -124,7 +124,7 @@ public static UpgradeStatus combine(UpgradeStatus... statuses) { */ public static class FeatureUpgradeStatus implements Writeable, ToXContentObject { private final String featureName; - private final Version minimumIndexVersion; + private final IndexVersion minimumIndexVersion; private final UpgradeStatus upgradeStatus; private final List indexInfos; @@ -136,7 +136,7 @@ public static class FeatureUpgradeStatus implements Writeable, ToXContentObject */ public FeatureUpgradeStatus( String featureName, - Version minimumIndexVersion, + IndexVersion minimumIndexVersion, UpgradeStatus upgradeStatus, List indexInfos ) { @@ -152,7 +152,7 @@ public FeatureUpgradeStatus( */ public FeatureUpgradeStatus(StreamInput in) throws IOException { this.featureName = in.readString(); - this.minimumIndexVersion = Version.readVersion(in); + this.minimumIndexVersion = IndexVersion.readVersion(in); this.upgradeStatus = in.readEnum(UpgradeStatus.class); this.indexInfos = in.readImmutableList(IndexInfo::new); } @@ -161,7 +161,7 @@ public String getFeatureName() { return this.featureName; } - public Version getMinimumIndexVersion() { + public IndexVersion getMinimumIndexVersion() { return this.minimumIndexVersion; } @@ -176,7 +176,7 @@ public List getIndexVersions() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.featureName); - Version.writeVersion(this.minimumIndexVersion, out); + IndexVersion.writeVersion(this.minimumIndexVersion, out); out.writeEnum(this.upgradeStatus); out.writeList(this.indexInfos); } @@ -240,16 +240,16 @@ public static class IndexInfo implements Writeable, ToXContentObject { ); private final String indexName; - private final Version version; + private final IndexVersion version; @Nullable private final Exception exception; // Present if this index failed /** * @param indexName Name of the index - * @param version Version of Elasticsearch that created the index + * @param version Index version * @param exception The exception that this index's migration failed with, if applicable */ - public IndexInfo(String indexName, Version version, Exception exception) { + public IndexInfo(String indexName, IndexVersion version, Exception exception) { this.indexName = indexName; this.version = version; this.exception = exception; @@ -261,7 +261,7 @@ public IndexInfo(String indexName, Version version, Exception exception) { */ public IndexInfo(StreamInput in) throws IOException { this.indexName = in.readString(); - this.version = Version.readVersion(in); + this.version = IndexVersion.readVersion(in); boolean hasException = in.readBoolean(); if (hasException) { this.exception = in.readException(); @@ -274,7 +274,7 @@ public String getIndexName() { return this.indexName; } - public Version getVersion() { + public IndexVersion getVersion() { return this.version; } @@ -285,7 +285,7 @@ public Exception getException() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.indexName); - Version.writeVersion(this.version, out); + IndexVersion.writeVersion(this.version, out); if (exception != null) { out.writeBoolean(true); out.writeException(this.exception); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index 432e9670f365e..46a46f8d3f44c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksService; @@ -52,6 +53,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA * Once all feature migrations for 8.x -> 9.x have been tested, we can bump this to Version.V_8_0_0 */ public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_7_0_0; + public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersion.V_7_0_0; private final SystemIndices systemIndices; PersistentTasksService persistentTasksService; @@ -124,14 +126,14 @@ static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus getFeatureUpgradeSta List indexInfos = getIndexInfos(state, feature); - Version minimumVersion = indexInfos.stream() + IndexVersion minimumVersion = indexInfos.stream() .map(GetFeatureUpgradeStatusResponse.IndexInfo::getVersion) - .min(Version::compareTo) - .orElse(Version.CURRENT); + .min(IndexVersion::compareTo) + .orElse(IndexVersion.current()); GetFeatureUpgradeStatusResponse.UpgradeStatus initialStatus; if (featureName.equals(currentFeature)) { initialStatus = IN_PROGRESS; - } else if (minimumVersion.before(NO_UPGRADE_REQUIRED_VERSION)) { + } else if (minimumVersion.before(NO_UPGRADE_REQUIRED_INDEX_VERSION)) { initialStatus = MIGRATION_NEEDED; } else { initialStatus = NO_MIGRATION_NEEDED; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 0e84fb659e826..3963faff454e9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsUpdater; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.tasks.Task; @@ -185,17 +186,6 @@ public void onAckTimeout() { } private void reroute(final boolean updateSettingsAcked) { - // We're about to send a second update task, so we need to check if we're still the elected master - // For example the minimum_master_node could have been breached and we're no longer elected master, - // so we should *not* execute the reroute. - if (clusterService.state().nodes().isLocalNodeElectedMaster() == false) { - logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); - listener.onResponse( - new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()) - ); - return; - } - // The reason the reroute needs to be sent as separate update task, is that all the *cluster* settings are encapsulated in // the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible to the components // until the ClusterStateListener instances have been invoked, but are visible after the first update task has been @@ -238,12 +228,12 @@ public void onFailure(Exception e) { }); } - public static class ClusterUpdateSettingsTask extends AckedClusterStateUpdateTask { + private static class ClusterUpdateSettingsTask extends AckedClusterStateUpdateTask { protected volatile boolean reroute = false; protected final SettingsUpdater updater; protected final ClusterUpdateSettingsRequest request; - public ClusterUpdateSettingsTask( + ClusterUpdateSettingsTask( final ClusterSettings clusterSettings, Priority priority, ClusterUpdateSettingsRequest request, @@ -254,13 +244,6 @@ public ClusterUpdateSettingsTask( this.request = request; } - /** - * Used by the reserved state handler {@link ReservedClusterSettingsAction} - */ - public ClusterUpdateSettingsTask(final ClusterSettings clusterSettings, ClusterUpdateSettingsRequest request) { - this(clusterSettings, Priority.IMMEDIATE, request, null); - } - @Override public ClusterState execute(final ClusterState currentState) { final ClusterState clusterState = updater.updateSettings( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 9206da3e14fa2..f23ee6242b5c8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -26,6 +27,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.node.NodeClosedException; @@ -136,6 +138,11 @@ public void onTimeout(TimeValue timeout) { } } + @SuppressForbidden(reason = "exposing ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + private ClusterStateResponse buildResponse(final ClusterStateRequest request, final ClusterState currentState) { logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); @@ -144,7 +151,7 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi if (request.nodes()) { builder.nodes(currentState.nodes()); - builder.transportVersions(currentState.transportVersions()); + builder.transportVersions(getTransportVersions(currentState)); } if (request.routingTable()) { if (request.indices().length > 0) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java index 5d71891432679..e0c2e511c6879 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -17,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -42,9 +42,9 @@ public final class VersionStats implements ToXContentFragment, Writeable { private final Set versionStats; public static VersionStats of(Metadata metadata, List nodeResponses) { - final Map indexCounts = new HashMap<>(); - final Map primaryShardCounts = new HashMap<>(); - final Map primaryByteCounts = new HashMap<>(); + final Map indexCounts = new HashMap<>(); + final Map primaryShardCounts = new HashMap<>(); + final Map primaryByteCounts = new HashMap<>(); final Map> indexPrimaryShardStats = new HashMap<>(); // Build a map from index name to primary shard stats @@ -69,38 +69,20 @@ public static VersionStats of(Metadata metadata, List for (Map.Entry cursor : metadata.indices().entrySet()) { IndexMetadata indexMetadata = cursor.getValue(); // Increment version-specific index counts - indexCounts.compute(indexMetadata.getCreationVersion(), (v, i) -> { - if (i == null) { - return 1; - } else { - return i + 1; - } - }); + indexCounts.merge(indexMetadata.getCreationVersion(), 1, Integer::sum); // Increment version-specific primary shard counts - primaryShardCounts.compute(indexMetadata.getCreationVersion(), (v, i) -> { - if (i == null) { - return indexMetadata.getNumberOfShards(); - } else { - return i + indexMetadata.getNumberOfShards(); - } - }); + primaryShardCounts.merge(indexMetadata.getCreationVersion(), indexMetadata.getNumberOfShards(), Integer::sum); // Increment version-specific primary shard sizes - primaryByteCounts.compute(indexMetadata.getCreationVersion(), (v, i) -> { - String indexName = indexMetadata.getIndex().getName(); - long indexPrimarySize = indexPrimaryShardStats.getOrDefault(indexName, Collections.emptyList()) - .stream() - .mapToLong(stats -> stats.getStats().getStore().sizeInBytes()) - .sum(); - if (i == null) { - return indexPrimarySize; - } else { - return i + indexPrimarySize; - } - }); + String indexName = indexMetadata.getIndex().getName(); + long indexPrimarySize = indexPrimaryShardStats.getOrDefault(indexName, Collections.emptyList()) + .stream() + .mapToLong(stats -> stats.getStats().getStore().sizeInBytes()) + .sum(); + primaryByteCounts.merge(indexMetadata.getCreationVersion(), indexPrimarySize, Long::sum); } List calculatedStats = new ArrayList<>(indexCounts.size()); - for (Map.Entry indexVersionCount : indexCounts.entrySet()) { - Version v = indexVersionCount.getKey(); + for (Map.Entry indexVersionCount : indexCounts.entrySet()) { + IndexVersion v = indexVersionCount.getKey(); SingleVersionStats singleStats = new SingleVersionStats( v, indexVersionCount.getValue(), @@ -164,12 +146,12 @@ public String toString() { static class SingleVersionStats implements ToXContentObject, Writeable, Comparable { - public final Version version; + public final IndexVersion version; public final int indexCount; public final int primaryShardCount; public final long totalPrimaryByteCount; - SingleVersionStats(Version version, int indexCount, int primaryShardCount, long totalPrimaryByteCount) { + SingleVersionStats(IndexVersion version, int indexCount, int primaryShardCount, long totalPrimaryByteCount) { this.version = version; this.indexCount = indexCount; this.primaryShardCount = primaryShardCount; @@ -177,7 +159,7 @@ static class SingleVersionStats implements ToXContentObject, Writeable, Comparab } SingleVersionStats(StreamInput in) throws IOException { - this.version = Version.readVersion(in); + this.version = IndexVersion.readVersion(in); this.indexCount = in.readVInt(); this.primaryShardCount = in.readVInt(); this.totalPrimaryByteCount = in.readVLong(); @@ -196,7 +178,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - Version.writeVersion(this.version, out); + IndexVersion.writeVersion(this.version, out); out.writeVInt(this.indexCount); out.writeVInt(this.primaryShardCount); out.writeVLong(this.totalPrimaryByteCount); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleConfig.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java similarity index 76% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleConfig.java rename to server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java index 049f6c5436187..524133faf86fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleConfig.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java @@ -1,10 +1,12 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.core.downsample; + +package org.elasticsearch.action.downsample; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.Strings; @@ -29,7 +31,7 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** - * This class holds the configuration details of a {@link DownsampleAction} that downsamples time series + * This class holds the configuration details of a DownsampleAction that downsamples time series * (TSDB) indices. We have made great effort to simplify the rollup configuration and currently * only requires a fixed time interval. So, it has the following format: * @@ -97,6 +99,37 @@ public DownsampleConfig(final StreamInput in) throws IOException { fixedInterval = new DateHistogramInterval(in); } + /** + * This method validates the target downsampling configuration can be applied on an index that has been + * already downsampled from the source configuration. The requirements are: + * - The target interval needs to be greater than source interval + * - The target interval needs to be a multiple of the source interval + * throws an IllegalArgumentException to signal that the target interval is not acceptable + */ + public static void validateSourceAndTargetIntervals(DownsampleConfig source, DownsampleConfig target) { + long sourceMillis = source.fixedInterval.estimateMillis(); + long targetMillis = target.fixedInterval.estimateMillis(); + if (sourceMillis >= targetMillis) { + // Downsampling interval must be greater than source interval + throw new IllegalArgumentException( + "Downsampling interval [" + + target.fixedInterval + + "] must be greater than the source index interval [" + + source.fixedInterval + + "]." + ); + } else if (targetMillis % sourceMillis != 0) { + // Downsampling interval must be a multiple of the source interval + throw new IllegalArgumentException( + "Downsampling interval [" + + target.fixedInterval + + "] must be a multiple of the source index interval [" + + source.fixedInterval + + "]." + ); + } + } + @Override public void writeTo(final StreamOutput out) throws IOException { fixedInterval.writeTo(out); @@ -154,11 +187,15 @@ public String getWriteableName() { public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); { - builder.field(FIXED_INTERVAL, fixedInterval.toString()); + toXContentFragment(builder); } return builder.endObject(); } + public XContentBuilder toXContentFragment(final XContentBuilder builder) throws IOException { + return builder.field(FIXED_INTERVAL, fixedInterval.toString()); + } + public static DownsampleConfig fromXContent(final XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index f660ee40f21e2..17c2a68f21332 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -660,7 +660,7 @@ public interface ReplicaResponse { } public static class RetryOnPrimaryException extends ElasticsearchException { - RetryOnPrimaryException(ShardId shardId, String msg) { + public RetryOnPrimaryException(ShardId shardId, String msg) { this(shardId, msg, null); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c5a72f97ea940..f7ff77b635a16 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -188,7 +188,7 @@ public ClusterState(long version, String stateUUID, ClusterState state) { state.metadata(), state.routingTable(), state.nodes(), - state.transportVersions(), + state.transportVersions, state.blocks(), state.customs(), false, @@ -736,7 +736,7 @@ public Builder(ClusterState state) { this.version = state.version(); this.uuid = state.stateUUID(); this.nodes = state.nodes(); - this.transportVersions = new HashMap<>(state.transportVersions()); + this.transportVersions = new HashMap<>(state.transportVersions); this.routingTable = state.routingTable(); this.metadata = state.metadata(); this.blocks = state.blocks(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index ffe09f19eb55b..e9529f9cdca16 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.Priority; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -238,6 +239,11 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } } + @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( ClusterState currentState, List> taskContexts, @@ -259,7 +265,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( assert currentState.term() < term : term + " vs " + currentState; DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); - Map transportVersions = new HashMap<>(currentState.transportVersions()); + Map transportVersions = new HashMap<>(getTransportVersions(currentState)); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); for (final var taskContext : taskContexts) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java index 18144b8e0e5a7..995066106e8ca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import java.util.HashMap; @@ -49,11 +50,16 @@ public NodeLeftExecutor(AllocationService allocationService) { this.allocationService = allocationService; } + @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { ClusterState initialState = batchExecutionContext.initialState(); DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(initialState.nodes()); - Map transportVersions = new HashMap<>(initialState.transportVersions()); + Map transportVersions = new HashMap<>(getTransportVersions(initialState)); boolean removed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java index 33f9d19b5131c..78399e1a19dc3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java @@ -10,6 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; @@ -20,18 +21,27 @@ import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.List; import java.util.Objects; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + /** - * Holds the Data Lifecycle Management metadata that are configuring how a data stream is managed. + * Holds the Data Lifecycle Management metadata that are configuring how a data stream is managed. Currently, it supports the following + * configurations: + * - data retention + * - downsampling */ public class DataLifecycle implements SimpleDiffable, ToXContentObject { @@ -45,15 +55,16 @@ public class DataLifecycle implements SimpleDiffable, ToXContentO private static final FeatureFlag DLM_FEATURE_FLAG = new FeatureFlag("dlm"); - public static final String DLM_ORIGIN = "data_lifecycle"; + public static final String DATA_STREAM_LIFECYCLE_ORIGIN = "data_stream_lifecycle"; public static final ParseField DATA_RETENTION_FIELD = new ParseField("data_retention"); + public static final ParseField DOWNSAMPLING_FIELD = new ParseField("downsampling"); private static final ParseField ROLLOVER_FIELD = new ParseField("rollover"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "lifecycle", false, - (args, unused) -> new DataLifecycle((Retention) args[0]) + (args, unused) -> new DataLifecycle((Retention) args[0], (Downsampling) args[1]) ); static { @@ -65,6 +76,13 @@ public class DataLifecycle implements SimpleDiffable, ToXContentO return new Retention(TimeValue.parseTimeValue(value, DATA_RETENTION_FIELD.getPreferredName())); } }, DATA_RETENTION_FIELD, ObjectParser.ValueType.STRING_OR_NULL); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return Downsampling.NULL; + } else { + return new Downsampling(AbstractObjectParser.parseArray(p, c, Downsampling.Round::fromXContent)); + } + }, DOWNSAMPLING_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_NULL); } public static boolean isEnabled() { @@ -73,17 +91,20 @@ public static boolean isEnabled() { @Nullable private final Retention dataRetention; + @Nullable + private final Downsampling downsampling; public DataLifecycle() { this((TimeValue) null); } public DataLifecycle(@Nullable TimeValue dataRetention) { - this(dataRetention == null ? null : new Retention(dataRetention)); + this(dataRetention == null ? null : new Retention(dataRetention), null); } - public DataLifecycle(@Nullable Retention dataRetention) { + public DataLifecycle(@Nullable Retention dataRetention, @Nullable Downsampling downsampling) { this.dataRetention = dataRetention; + this.downsampling = downsampling; } public DataLifecycle(long timeInMills) { @@ -113,18 +134,36 @@ Retention getDataRetention() { return dataRetention; } + /** + * The configured downsampling rounds with the `after` and the `fixed_interval` per round. If downsampling is + * not configured then it returns null. + */ + @Nullable + public List getDownsamplingRounds() { + return downsampling == null ? null : downsampling.rounds(); + } + + /** + * Returns the configured wrapper object as it was defined in the template. This should be used only during + * template composition. + */ + @Nullable + Downsampling getDownsampling() { + return downsampling; + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final DataLifecycle that = (DataLifecycle) o; - return Objects.equals(dataRetention, that.dataRetention); + return Objects.equals(dataRetention, that.dataRetention) && Objects.equals(downsampling, that.downsampling); } @Override public int hashCode() { - return Objects.hash(dataRetention); + return Objects.hash(dataRetention, downsampling); } @Override @@ -132,6 +171,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_007)) { out.writeOptionalWriteable(dataRetention); } + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_026)) { + out.writeOptionalWriteable(downsampling); + } } public DataLifecycle(StreamInput in) throws IOException { @@ -140,6 +182,11 @@ public DataLifecycle(StreamInput in) throws IOException { } else { dataRetention = null; } + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_026)) { + downsampling = in.readOptionalWriteable(Downsampling::read); + } else { + downsampling = null; + } } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -169,6 +216,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(DATA_RETENTION_FIELD.getPreferredName(), dataRetention.value().getStringRep()); } } + if (downsampling != null) { + builder.field(DOWNSAMPLING_FIELD.getPreferredName()); + downsampling.toXContent(builder, params); + } if (rolloverConfiguration != null) { builder.field(ROLLOVER_FIELD.getPreferredName()); rolloverConfiguration.evaluateAndConvertToXContent(builder, params, getEffectiveDataRetention()); @@ -187,18 +238,25 @@ public static DataLifecycle fromXContent(XContentParser parser) throws IOExcepti static class Builder { @Nullable private Retention dataRetention = null; + @Nullable + private Downsampling downsampling = null; Builder dataRetention(@Nullable Retention value) { dataRetention = value; return this; } + Builder downsampling(@Nullable Downsampling value) { + downsampling = value; + return this; + } + DataLifecycle build() { - return new DataLifecycle(dataRetention); + return new DataLifecycle(dataRetention, downsampling); } static Builder newBuilder(DataLifecycle dataLifecycle) { - return new Builder().dataRetention(dataLifecycle.getDataRetention()); + return new Builder().dataRetention(dataLifecycle.getDataRetention()).downsampling(dataLifecycle.getDownsampling()); } } @@ -220,4 +278,122 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(value); } } + + /** + * Downsampling holds the configuration about when should elasticsearch downsample a backing index. + * @param rounds is a list of downsampling configuration which instructs when a backing index should be downsampled (`after`) and at + * which interval (`fixed_interval`). Null represents an explicit no downsampling during template composition. + */ + public record Downsampling(@Nullable List rounds) implements Writeable, ToXContentFragment { + + /** + * A round represents the configuration for when and how elasticsearch will downsample a backing index. + * @param after is a TimeValue configuring how old (based on generation age) should a backing index be before downsampling + * @param config contains the interval that the backing index is going to be downsampled. + */ + public record Round(TimeValue after, DownsampleConfig config) implements Writeable, ToXContentObject { + + public static final ParseField AFTER_FIELD = new ParseField("after"); + public static final ParseField FIXED_INTERVAL_FIELD = new ParseField("fixed_interval"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "downsampling_round", + false, + (args, unused) -> new Round((TimeValue) args[0], new DownsampleConfig((DateHistogramInterval) args[1])) + ); + + static { + PARSER.declareString( + ConstructingObjectParser.optionalConstructorArg(), + value -> TimeValue.parseTimeValue(value, AFTER_FIELD.getPreferredName()), + AFTER_FIELD + ); + PARSER.declareField( + constructorArg(), + p -> new DateHistogramInterval(p.text()), + new ParseField(FIXED_INTERVAL_FIELD.getPreferredName()), + ObjectParser.ValueType.STRING + ); + } + + public static Round read(StreamInput in) throws IOException { + return new Round(in.readTimeValue(), new DownsampleConfig(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeTimeValue(after); + out.writeWriteable(config); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(AFTER_FIELD.getPreferredName(), after.getStringRep()); + config.toXContentFragment(builder); + builder.endObject(); + return builder; + } + + public static Round fromXContent(XContentParser parser, Void context) throws IOException { + return PARSER.parse(parser, context); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + } + + // For testing + public static final Downsampling NULL = new Downsampling(null); + + public Downsampling { + if (rounds != null) { + if (rounds.isEmpty()) { + throw new IllegalArgumentException("Downsampling configuration should have at least one round configured."); + } + Round previous = null; + for (Round round : rounds) { + if (previous == null) { + previous = round; + } else { + if (round.after.compareTo(previous.after) < 0) { + throw new IllegalArgumentException( + "A downsampling round must have a later 'after' value than the proceeding, " + + round.after.getStringRep() + + " is not after " + + previous.after.getStringRep() + + "." + ); + } + DownsampleConfig.validateSourceAndTargetIntervals(previous.config(), round.config()); + } + } + } + } + + public static Downsampling read(StreamInput in) throws IOException { + return new Downsampling(in.readOptionalList(Round::read)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalCollection(rounds, (o, v) -> v.writeTo(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (rounds == null) { + builder.nullValue(); + } else { + builder.startArray(); + for (Round round : rounds) { + round.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index e0a748ce92099..68c227dc5bae0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -1035,12 +1035,12 @@ public long primaryTerm(int shardId) { } /** - * Return the {@link Version} on which this index has been created. This + * Return the {@link IndexVersion} on which this index has been created. This * information is typically useful for backward compatibility. * To check index compatibility (e.g. N-1 checks), use {@link #getCompatibilityVersion()} instead. */ - public Version getCreationVersion() { - return indexCreatedVersion.toVersion(); + public IndexVersion getCreationVersion() { + return indexCreatedVersion; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 7071b415d2e73..e5a41988f30b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.SystemIndices; @@ -60,6 +61,7 @@ public class IndexNameExpressionResolver { public static final String EXCLUDED_DATA_STREAMS_KEY = "es.excluded_ds"; public static final Version SYSTEM_INDEX_ENFORCEMENT_VERSION = Version.V_8_0_0; + public static final IndexVersion SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION = IndexVersion.V_8_0_0; private final ThreadContext threadContext; private final SystemIndices systemIndices; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index aae6fe739068e..79409ca032472 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -2567,7 +2567,7 @@ private static void validateAlias(String aliasName, List indexMet if (isNonEmpty(groupedBySystemStatus.get(false)) && isNonEmpty(groupedBySystemStatus.get(true))) { final List newVersionSystemIndices = groupedBySystemStatus.get(true) .stream() - .filter(i -> i.getCreationVersion().onOrAfter(IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION)) + .filter(i -> i.getCreationVersion().onOrAfter(IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION)) .map(i -> i.getIndex().getName()) .sorted() // reliable error message for testing .toList(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index bd0aa2995cddc..2e049250830fe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1574,7 +1574,7 @@ static void prepareResizeIndexSettings( builder.put(sourceMetadata.getSettings().filter(sourceSettingsPredicate)); } - indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, sourceMetadata.getCreationVersion()) + indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, sourceMetadata.getCreationVersion().id()) .put(builder.build()) .put(IndexMetadata.SETTING_ROUTING_PARTITION_SIZE, sourceMetadata.getRoutingPartitionSize()) .put(IndexMetadata.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 0e0e7101a21a6..c3884618ffe1f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1516,6 +1516,9 @@ public static DataLifecycle composeDataLifecycles(List lifecycles if (current.getDataRetention() != null) { builder.dataRetention(current.getDataRetention()); } + if (current.getDownsampling() != null) { + builder.downsampling(current.getDownsampling()); + } } } return builder == null ? null : builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 8b06244557753..3f3be87f7cf68 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -605,8 +605,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); builder.field("version", versionInfo.nodeVersion()); - builder.field("minIndexVersion", versionInfo.minIndexVersion()); - builder.field("maxIndexVersion", versionInfo.maxIndexVersion()); + builder.field("min_index_version", versionInfo.minIndexVersion()); + builder.field("max_index_version", versionInfo.maxIndexVersion()); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java index e462e1da5450a..e34703c25add5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java @@ -147,7 +147,7 @@ public void onFailure(Exception e) { e ); } - ActionListener.onFailure(currentListeners, new ElasticsearchException("delayed reroute [" + reason + "] failed", e)); + ActionListener.onFailure(currentListeners, e); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index cc0bb6fd4323b..bd15d924c9c19 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -613,7 +613,6 @@ public IndexShardRoutingTable build() { assert distinctNodes(shards) : "more than one shard with same id assigned to same node (shards: " + shards + ")"; assert noDuplicatePrimary(shards) : "expected but did not find unique primary in shard routing table: " + shards; assert noAssignedReplicaWithoutActivePrimary(shards) : "unexpected assigned replica with no active primary: " + shards; - assert noRelocatingUnsearchableShards(shards) : "unexpected RELOCATING unsearchable shard: " + shards; return new IndexShardRoutingTable(shardId, shards); } @@ -664,14 +663,6 @@ static boolean noAssignedReplicaWithoutActivePrimary(List shards) return seenAssignedReplica == false; } - static boolean noRelocatingUnsearchableShards(List shards) { - // this is unsupported until ES-4677 is implemented - for (var shard : shards) { - assert shard.role().isSearchable() || shard.relocating() == false : "unexpected RELOCATING unsearchable shard: " + shard; - } - return true; - } - public static IndexShardRoutingTable.Builder readFrom(StreamInput in) throws IOException { Index index = new Index(in); return readFromThin(in, index); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java index d3a9cf548b068..bb4eef2bd422d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java @@ -8,11 +8,11 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.xcontent.ToXContent; @@ -206,9 +206,9 @@ public static class SnapshotRecoverySource extends RecoverySource { private final String restoreUUID; private final Snapshot snapshot; private final IndexId index; - private final Version version; + private final IndexVersion version; - public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version version, IndexId indexId) { + public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, IndexVersion version, IndexId indexId) { this.restoreUUID = restoreUUID; this.snapshot = Objects.requireNonNull(snapshot); this.version = Objects.requireNonNull(version); @@ -218,7 +218,7 @@ public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version ver SnapshotRecoverySource(StreamInput in) throws IOException { restoreUUID = in.readString(); snapshot = new Snapshot(in); - version = Version.readVersion(in); + version = IndexVersion.readVersion(in); index = new IndexId(in); } @@ -240,7 +240,7 @@ public IndexId index() { return index; } - public Version version() { + public IndexVersion version() { return version; } @@ -248,7 +248,7 @@ public Version version() { protected void writeAdditionalFields(StreamOutput out) throws IOException { out.writeString(restoreUUID); snapshot.writeTo(out); - Version.writeVersion(version, out); + IndexVersion.writeVersion(version, out); index.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 0856472bccf7f..c4f827f807502 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -459,30 +459,6 @@ public Tuple relocateShard( return Tuple.tuple(source, target); } - public void relocateOrReinitializeShard( - ShardRouting startedShard, - String nodeId, - long expectedShardSize, - RoutingChangesObserver changes - ) { - if (startedShard.isSearchable() == false) { - remove(startedShard); - var unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "relocating unsearchable shard"); - var assignedShards = assignedShards(startedShard.shardId()); - var promotableShard = assignedShards.stream().filter(ShardRouting::isPromotableToPrimary).findAny(); - assert promotableShard.isEmpty() : "multiple promotable shards are not supported yet"; - // replicas needs to be removed as well as they could not be active when primary is unassigned - // see org.elasticsearch.cluster.routing.IndexShardRoutingTable.Builder.noAssignedReplicaWithoutActivePrimary - for (ShardRouting replica : List.copyOf(assignedShards)) { - remove(replica); - unassignedShards.ignoreShard(replica.moveToUnassigned(unassignedInfo), AllocationStatus.NO_ATTEMPT, changes); - } - initializeShard(startedShard.moveToUnassigned(unassignedInfo), nodeId, null, expectedShardSize, changes); - } else { - relocateShard(startedShard, nodeId, expectedShardSize, changes); - } - } - /** * Applies the relevant logic to start an initializing shard. * @@ -533,7 +509,7 @@ public ShardRouting startShard( routing, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "primary changed") ); - relocateOrReinitializeShard( + relocateShard( startedReplica, sourceShard.relocatingNodeId(), sourceShard.getExpectedShardSize(), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index b286f74bde308..5e66c241afbee 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -27,6 +26,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.gateway.PriorityComparator; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; @@ -145,7 +145,7 @@ private boolean allocateUnassignedInvariant() { final var shardCounts = allocation.metadata().stream().filter(indexMetadata -> // skip any pre-7.2 closed indices which have no routing table entries at all - indexMetadata.getCreationVersion().onOrAfter(Version.V_7_2_0) + indexMetadata.getCreationVersion().onOrAfter(IndexVersion.V_7_2_0) || indexMetadata.getState() == IndexMetadata.State.OPEN || MetadataIndexStateService.isIndexVerifiedBeforeClosed(indexMetadata)) .flatMap( @@ -378,7 +378,7 @@ private void moveShards() { final var moveTarget = findRelocationTarget(shardRouting, assignment.nodeIds()); if (moveTarget != null) { logger.debug("Moving shard {} from {} to {}", shardRouting.shardId(), shardRouting.currentNodeId(), moveTarget.getId()); - routingNodes.relocateOrReinitializeShard( + routingNodes.relocateShard( shardRouting, moveTarget.getId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), @@ -443,7 +443,7 @@ private void balance() { rebalanceTarget.getId() ); - routingNodes.relocateOrReinitializeShard( + routingNodes.relocateShard( shardRouting, rebalanceTarget.getId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index 23a579988d583..8b9d5a402634f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -168,7 +168,7 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it... } allocation.routingNodes() - .relocateOrReinitializeShard( + .relocateShard( shardRouting, toRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index 78c92f47f2b19..bc97533c735e2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -118,14 +118,14 @@ private static Decision isVersionCompatible( final RoutingNode target, final RoutingAllocation allocation ) { - if (target.node().getVersion().onOrAfter(recoverySource.version())) { + if (target.node().getVersion().onOrAfter(recoverySource.version().toVersion())) { /* we can allocate if we can restore from a snapshot that is older or on the same version */ return allocation.decision( Decision.YES, NAME, "node version [%s] is the same or newer than snapshot version [%s]", target.node().getVersion(), - recoverySource.version() + recoverySource.version().toVersion() ); } else { return allocation.decision( @@ -133,7 +133,7 @@ private static Decision isVersionCompatible( NAME, "node version [%s] is older than the snapshot version [%s]", target.node().getVersion(), - recoverySource.version() + recoverySource.version().toVersion() ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index c650ecbc81ae0..d22c22a22be10 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.common.Priority; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -112,6 +113,11 @@ public ClusterState execute(BatchExecutionContext cont } } + @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + @Override public void clusterChanged(ClusterChangedEvent event) { if (event.localNodeMaster() == false) return; // only if we're master @@ -123,9 +129,7 @@ public void clusterChanged(ClusterChangedEvent event) { && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes - Set nodes = event.state() - .transportVersions() - .entrySet() + Set nodes = getTransportVersions(event.state()).entrySet() .stream() .filter(e -> e.getValue().equals(INFERRED_TRANSPORT_VERSION)) .map(Map.Entry::getKey) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsUpdater.java similarity index 95% rename from server/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java rename to server/src/main/java/org/elasticsearch/common/settings/SettingsUpdater.java index 89176229ebf8c..2fa87e55fe3bf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsUpdater.java @@ -6,15 +6,13 @@ * Side Public License, v 1. */ -package org.elasticsearch.action.admin.cluster.settings; +package org.elasticsearch.common.settings; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import java.util.Map; @@ -26,24 +24,24 @@ * Updates transient and persistent cluster state settings if there are any changes * due to the update. */ -final class SettingsUpdater { +public final class SettingsUpdater { final Settings.Builder transientUpdates = Settings.builder(); final Settings.Builder persistentUpdates = Settings.builder(); private final ClusterSettings clusterSettings; - SettingsUpdater(ClusterSettings clusterSettings) { + public SettingsUpdater(ClusterSettings clusterSettings) { this.clusterSettings = clusterSettings; } - synchronized Settings getTransientUpdates() { + public synchronized Settings getTransientUpdates() { return transientUpdates.build(); } - synchronized Settings getPersistentUpdate() { + public synchronized Settings getPersistentUpdate() { return persistentUpdates.build(); } - synchronized ClusterState updateSettings( + public synchronized ClusterState updateSettings( final ClusterState currentState, final Settings transientToApply, final Settings persistentToApply, diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index 6730379edc0e6..528efe8fa8b0e 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -54,11 +54,14 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.StampedLock; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST; @@ -87,7 +90,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private volatile BoundTransportAddress boundAddress; private final AtomicLong totalChannelsAccepted = new AtomicLong(); - private final Set httpChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final Map httpChannels = new ConcurrentHashMap<>(); private final PlainActionFuture allClientsClosedListener = PlainActionFuture.newFuture(); private final RefCounted refCounted = AbstractRefCounted.of(() -> allClientsClosedListener.onResponse(null)); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); @@ -96,7 +99,8 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private final HttpTracer httpLogger; private final Tracer tracer; - private volatile boolean gracefullyCloseConnections; + private volatile boolean shuttingDown; + private final ReadWriteLock shuttingDownRWLock = new StampedLock().asReadWriteLock(); private volatile long slowLogThresholdMs; @@ -226,13 +230,16 @@ private TransportAddress bindAddress(final InetAddress hostAddress) { * Gracefully shut down. If {@link HttpTransportSettings#SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD} is zero, the default, then * forcefully close all open connections immediately. * Serially run through the following steps: - * 1) Stop listening for new HTTP connections, which means no new HttpChannel are added to the {@link #httpChannels} list - * 2) Add the {@code Connection: close} response header to all new requests on existing {@link #httpChannels} and close the HttpChannel - * after the new request completes - * 3) If grace period is set, wait for all {@link #httpChannels} to close via 2 for up to the configured grace period, + *
    + *
  1. Stop listening for new HTTP connections, which means no new HttpChannel are added to the {@link #httpChannels} list. + * {@link #serverAcceptedChannel(HttpChannel)} will close any new channels to ensure this is true. + *
  2. Close the HttpChannel after a new request completes on all existing channels. + *
  3. Close all idle channels. + *
  4. If grace period is set, wait for all httpChannels to close via 2 for up to the configured grace period, * {@link #shutdownGracePeriodMillis}. - * If all connections are closed before the expiration of the grace period, stop waiting early. - * 4) Close all open httpChannels even if requests are in flight. + * If all connections are closed before the expiration of the grace period, stop waiting early. + *
  5. Close all remaining open httpChannels even if requests are in flight. + *
*/ @Override protected void doStop() { @@ -247,20 +254,33 @@ protected void doStop() { } } } - gracefullyCloseConnections(); - refCounted.decRef(); + + var wlock = shuttingDownRWLock.writeLock(); + try { + wlock.lock(); + shuttingDown = true; + refCounted.decRef(); + httpChannels.values().forEach(RequestTrackingHttpChannel::setCloseWhenIdle); + } finally { + wlock.unlock(); + } + boolean closed = false; + if (shutdownGracePeriodMillis > 0) { try { + logger.debug(format("waiting [%d]ms for clients to close connections", shutdownGracePeriodMillis)); FutureUtils.get(allClientsClosedListener, shutdownGracePeriodMillis, TimeUnit.MILLISECONDS); closed = true; } catch (ElasticsearchTimeoutException t) { logger.warn(format("timed out while waiting [%d]ms for clients to close connections", shutdownGracePeriodMillis)); } + } else { + logger.debug("closing all client connections immediately"); } if (closed == false) { try { - CloseableChannel.closeChannels(new ArrayList<>(httpChannels), true); + CloseableChannel.closeChannels(new ArrayList<>(httpChannels.values()), true); } catch (Exception e) { logger.warn("unexpected exception while closing http channels", e); } @@ -275,11 +295,8 @@ protected void doStop() { stopInternal(); } - /** - * Close the client channel after a new request. - */ - void gracefullyCloseConnections() { - gracefullyCloseConnections = true; + boolean isAcceptingConnections() { + return shuttingDown == false; } @Override @@ -367,8 +384,19 @@ protected static void onServerException(HttpServerChannel channel, Exception e) } protected void serverAcceptedChannel(HttpChannel httpChannel) { - boolean addedOnThisCall = httpChannels.add(httpChannel); - assert addedOnThisCall : "Channel should only be added to http channel set once"; + var rlock = shuttingDownRWLock.readLock(); + try { + rlock.lock(); + if (shuttingDown) { + logger.warn("server accepted channel after shutting down"); + httpChannel.close(); + return; + } + RequestTrackingHttpChannel trackingChannel = httpChannels.putIfAbsent(httpChannel, new RequestTrackingHttpChannel(httpChannel)); + assert trackingChannel == null : "Channel should only be added to http channel set once"; + } finally { + rlock.unlock(); + } refCounted.incRef(); httpChannel.addCloseListener(ActionListener.running(() -> { httpChannels.remove(httpChannel); @@ -387,9 +415,17 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { httpClientStatsTracker.updateClientStats(httpRequest, httpChannel); + final RequestTrackingHttpChannel trackingChannel = httpChannels.get(httpChannel); final long startTime = threadPool.rawRelativeTimeInMillis(); try { - handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); + // The channel may not be present if the close listener (set in serverAcceptedChannel) runs before this method because the + // connection closed early + if (trackingChannel == null) { + logger.warn("http channel [{}] missing tracking channel", httpChannel); + return; + } + trackingChannel.incomingRequest(); + handleIncomingRequest(httpRequest, trackingChannel, httpRequest.getInboundException()); } finally { final long took = threadPool.rawRelativeTimeInMillis() - startTime; networkService.getHandlingTimeTracker().addHandlingTime(took); @@ -492,8 +528,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan threadContext, corsHandler, maybeHttpLogger, - tracer, - gracefullyCloseConnections + tracer ); } catch (final IllegalArgumentException e) { badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e); @@ -507,8 +542,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan threadContext, corsHandler, httpLogger, - tracer, - gracefullyCloseConnections + tracer ); } channel = innerChannel; @@ -550,4 +584,76 @@ private static ActionListener earlyResponseListener(HttpRequest request, H public ThreadPool getThreadPool() { return threadPool; } + + /** + * A {@link HttpChannel} that tracks number of requests via a {@link RefCounted}. + */ + private static class RequestTrackingHttpChannel implements HttpChannel { + /** + * Only counts down to zero via {@link #setCloseWhenIdle()}. + */ + final RefCounted refCounted = AbstractRefCounted.of(this::closeInner); + final HttpChannel inner; + + RequestTrackingHttpChannel(HttpChannel inner) { + this.inner = inner; + } + + public void incomingRequest() throws IllegalStateException { + refCounted.incRef(); + } + + /** + * Close the channel when there are no more requests in flight. + */ + public void setCloseWhenIdle() { + refCounted.decRef(); + } + + @Override + public void close() { + closeInner(); + } + + /** + * Synchronized to avoid double close due to a natural close and a close via {@link #setCloseWhenIdle()} + */ + private void closeInner() { + synchronized (inner) { + if (inner.isOpen()) { + inner.close(); + } else { + logger.info("channel [{}] already closed", inner); + } + } + } + + @Override + public void addCloseListener(ActionListener listener) { + inner.addCloseListener(listener); + } + + @Override + public boolean isOpen() { + return inner.isOpen(); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + inner.sendResponse( + response, + listener != null ? ActionListener.runAfter(listener, refCounted::decRef) : ActionListener.running(refCounted::decRef) + ); + } + + @Override + public InetSocketAddress getLocalAddress() { + return inner.getLocalAddress(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return inner.getRemoteAddress(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 2b4e1fdc1d58c..6fc6e7eb3ffbc 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -56,7 +56,6 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann private final HttpChannel httpChannel; private final CorsHandler corsHandler; private final Tracer tracer; - private final boolean closeConnection; @Nullable private final HttpTracer httpLogger; @@ -70,8 +69,7 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann ThreadContext threadContext, CorsHandler corsHandler, @Nullable HttpTracer httpLogger, - Tracer tracer, - boolean closeConnection + Tracer tracer ) { super(request, settings.detailedErrorsEnabled()); this.httpChannel = httpChannel; @@ -82,7 +80,6 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann this.corsHandler = corsHandler; this.httpLogger = httpLogger; this.tracer = tracer; - this.closeConnection = closeConnection; } @Override @@ -98,7 +95,7 @@ public void sendResponse(RestResponse restResponse) { final SpanId spanId = SpanId.forRestRequest(request); final ArrayList toClose = new ArrayList<>(4); - if (HttpUtils.shouldCloseConnection(httpRequest) || closeConnection) { + if (HttpUtils.shouldCloseConnection(httpRequest)) { toClose.add(() -> CloseableChannel.closeChannel(httpChannel)); } toClose.add(() -> tracer.stopTrace(request)); @@ -162,9 +159,6 @@ public void sendResponse(RestResponse restResponse) { // Add all custom headers addCustomHeaders(httpResponse, restResponse.getHeaders()); addCustomHeaders(httpResponse, restResponse.filterHeaders(threadContext.getResponseHeaders())); - if (closeConnection) { - setHeaderField(httpResponse, CONNECTION, CLOSE); - } // If our response doesn't specify a content-type header, set one setHeaderField(httpResponse, CONTENT_TYPE, restResponse.contentType(), false); diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 72feba325acb0..3820b99945181 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -268,7 +268,7 @@ public final class IndexSettings { TimeValue.MINUS_ONE, Property.NodeScope ); // TODO: remove setting - public static TimeValue STATELESS_DEFAULT_REFRESH_INTERVAL = TimeValue.timeValueSeconds(5); // TODO: settle on right value + public static TimeValue STATELESS_DEFAULT_REFRESH_INTERVAL = TimeValue.timeValueSeconds(10); // TODO: settle on right value public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", (settings) -> { if (EXISTING_SHARDS_ALLOCATOR_SETTING.get(settings).equals("stateless") && INDEX_FAST_REFRESH_SETTING.get(settings) == false) { return STATELESS_DEFAULT_REFRESH_INTERVAL; diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index 5bea5fcaf2c0a..dfddc35398f72 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -19,6 +19,7 @@ import java.io.IOException; +import java.io.IOException; import java.lang.reflect.Field; import java.util.Collection; import java.util.Collections; @@ -142,6 +143,7 @@ private static IndexVersion registerIndexVersion(int id, Version luceneVersion, public static final IndexVersion V_7_17_9 = registerIndexVersion(7_17_09_99, Version.LUCENE_8_11_1, "8044989f-77ef-4d6d-9dd8-1bdd805cef74"); public static final IndexVersion V_7_17_10 = registerIndexVersion(7_17_10_99, Version.LUCENE_8_11_1, "66b743fb-8be6-443f-8920-d8c5ed561857"); public static final IndexVersion V_7_17_11 = registerIndexVersion(7_17_11_99, Version.LUCENE_8_11_1, "f1935acc-1af9-44b0-97e9-67112d333753"); + public static final IndexVersion V_7_17_12 = registerIndexVersion(7_17_12_99, Version.LUCENE_8_11_1, "1a0719f2-96f4-4df5-b20d-62244e27d7d4"); public static final IndexVersion V_8_0_0 = registerIndexVersion(8_00_00_99, Version.LUCENE_9_0_0, "ff18a13c-1fa7-4cf7-a3b1-4fdcd9461d5b"); public static final IndexVersion V_8_0_1 = registerIndexVersion(8_00_01_99, Version.LUCENE_9_0_0, "4bd5650f-3eff-418f-a7a6-ad46b2a9c941"); public static final IndexVersion V_8_1_0 = registerIndexVersion(8_01_00_99, Version.LUCENE_9_0_0, "b4742461-ee43-4fd0-a260-29f8388b82ec"); @@ -172,13 +174,13 @@ private static IndexVersion registerIndexVersion(int id, Version luceneVersion, public static final IndexVersion V_8_8_0 = registerIndexVersion(8_08_00_99, Version.LUCENE_9_6_0, "d6ffc8d7-f6bd-469b-8495-01688c310000"); public static final IndexVersion V_8_8_1 = registerIndexVersion(8_08_01_99, Version.LUCENE_9_6_0, "a613499e-ec1a-4b0b-81d3-a766aff3c27c"); public static final IndexVersion V_8_8_2 = registerIndexVersion(8_08_02_99, Version.LUCENE_9_6_0, "9db9d888-6be8-4a58-825c-f423fd8c6b00"); + public static final IndexVersion V_8_8_3 = registerIndexVersion(8_08_03_99, Version.LUCENE_9_6_0, "e279a94a-25e8-4919-9a17-39af37b75a67"); public static final IndexVersion V_8_9_0 = registerIndexVersion(8_09_00_99, Version.LUCENE_9_7_0, "32f6dbab-cc24-4f5b-87b5-015a848480d9"); public static final IndexVersion V_8_10_0 = registerIndexVersion(8_10_00_99, Version.LUCENE_9_7_0, "2e107286-12ad-4c51-9a6f-f8943663b6e7"); /* * READ THE JAVADOC ABOVE BEFORE ADDING NEW INDEX VERSIONS * Detached index versions added below here. */ - private static class CurrentHolder { private static final IndexVersion CURRENT = findCurrent(V_8_10_0); @@ -344,7 +346,7 @@ public int compareTo(IndexVersion other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.value(toString()); + return builder.value(id); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index a2c46e4e9d6d2..1a9fea929a20c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -322,7 +322,7 @@ protected QueryBuilder doCoordinatorRewrite(final CoordinatorRewriteContext coor * @return A {@link QueryBuilder} representing the rewritten query. */ protected QueryBuilder doSearchRewrite(final SearchExecutionContext searchExecutionContext) throws IOException { - return this; + return doIndexMetadataRewrite(searchExecutionContext); } /** diff --git a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java index d7e54df00ed06..06e3e41e9c4ad 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java @@ -66,14 +66,12 @@ public String fieldName() { } @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - SearchExecutionContext context = queryRewriteContext.convertToSearchExecutionContext(); - if (context != null) { - if (getMappedFields(context, fieldName).isEmpty()) { - return new MatchNoneQueryBuilder(); - } + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + if (getMappedFields(context, fieldName).isEmpty()) { + return new MatchNoneQueryBuilder(); + } else { + return this; } - return super.doRewrite(queryRewriteContext); } @Override @@ -153,7 +151,7 @@ public static Query newFilter(SearchExecutionContext context, String fieldPatter return new ConstantScoreQuery(boolFilterBuilder.build()); } - private static Collection getMappedFields(SearchExecutionContext context, String fieldPattern) { + private static Collection getMappedFields(QueryRewriteContext context, String fieldPattern) { Set matchingFieldNames = context.getMatchingFieldNames(fieldPattern); if (matchingFieldNames.isEmpty()) { // might be an object field, so try matching it as an object prefix pattern diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index 04421048303eb..f1dc5d1259556 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -156,11 +156,6 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.endObject(); } - @Override - protected QueryBuilder doSearchRewrite(SearchExecutionContext searchExecutionContext) throws IOException { - return doIndexMetadataRewrite(searchExecutionContext); - } - @Override protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { // If we're using the default keyword analyzer then we can rewrite this to a TermQueryBuilder diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 59ec874bae644..fc6dc04faf658 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -26,9 +27,11 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BooleanSupplier; import java.util.function.LongSupplier; @@ -289,4 +292,34 @@ public boolean indexMatches(String pattern) { assert indexNameMatcher != null; return indexNameMatcher.test(pattern); } + + /** + * Returns the names of all mapped fields that match a given pattern + * + * All names returned by this method are guaranteed to resolve to a + * MappedFieldType if passed to {@link #getFieldType(String)} + * + * @param pattern the field name pattern + */ + public Set getMatchingFieldNames(String pattern) { + if (runtimeMappings.isEmpty()) { + return mappingLookup.getMatchingFieldNames(pattern); + } + Set matches = new HashSet<>(mappingLookup.getMatchingFieldNames(pattern)); + if ("*".equals(pattern)) { + matches.addAll(runtimeMappings.keySet()); + } else if (Regex.isSimpleMatchPattern(pattern) == false) { + // no wildcard + if (runtimeMappings.containsKey(pattern)) { + matches.add(pattern); + } + } else { + for (String name : runtimeMappings.keySet()) { + if (Regex.simpleMatch(pattern, name)) { + matches.add(name); + } + } + } + return matches; + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index 65fe9356a3f03..5f285326446a3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.Index; @@ -311,36 +310,6 @@ public boolean hasMappings() { return mappingLookup.hasMappings(); } - /** - * Returns the names of all mapped fields that match a given pattern - * - * All names returned by this method are guaranteed to resolve to a - * MappedFieldType if passed to {@link #getFieldType(String)} - * - * @param pattern the field name pattern - */ - public Set getMatchingFieldNames(String pattern) { - if (runtimeMappings.isEmpty()) { - return mappingLookup.getMatchingFieldNames(pattern); - } - Set matches = new HashSet<>(mappingLookup.getMatchingFieldNames(pattern)); - if ("*".equals(pattern)) { - matches.addAll(runtimeMappings.keySet()); - } else if (Regex.isSimpleMatchPattern(pattern) == false) { - // no wildcard - if (runtimeMappings.containsKey(pattern)) { - matches.add(pattern); - } - } else { - for (String name : runtimeMappings.keySet()) { - if (Regex.simpleMatch(pattern, name)) { - matches.add(name); - } - } - } - return matches; - } - /** * Returns true if the field identified by the provided name is mapped, false otherwise */ diff --git a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java index f5935aa7eff20..be66c2b879943 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java @@ -167,11 +167,6 @@ protected void addExtraXContent(XContentBuilder builder, Params params) throws I } } - @Override - protected QueryBuilder doSearchRewrite(SearchExecutionContext searchExecutionContext) throws IOException { - return doIndexMetadataRewrite(searchExecutionContext); - } - @Override protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { MappedFieldType fieldType = context.getFieldType(this.fieldName); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index c75cbb0308a86..a3f6d8a2921ea 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -10,8 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -26,12 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -44,6 +39,7 @@ import java.util.Objects; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.index.seqno.RetentionLeaseSyncAction.getExceptionLogLevel; /** * Replication action responsible for background syncing retention leases to replicas. This action is deliberately a replication action so @@ -129,20 +125,7 @@ public void handleResponse(ReplicationResponse response) { public void handleException(TransportException e) { task.setPhase("finished"); taskManager.unregister(task); - if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { - // node shutting down - return; - } - if (ExceptionsHelper.unwrap( - e, - IndexNotFoundException.class, - AlreadyClosedException.class, - IndexShardClosedException.class - ) != null) { - // the index was deleted or the shard is closed - return; - } - getLogger().warn(() -> format("%s retention lease background sync failed", shardId), e); + LOGGER.log(getExceptionLogLevel(e), () -> format("%s retention lease background sync failed", shardId), e); } } ); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 44b5ba0674214..ebfc8ff28f3f7 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.action.support.replication.TransportWriteAction; @@ -37,6 +38,7 @@ import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -146,10 +148,12 @@ public void handleException(TransportException e) { static Level getExceptionLogLevel(Exception e) { return ExceptionsHelper.unwrap( e, + NodeClosedException.class, IndexNotFoundException.class, AlreadyClosedException.class, IndexShardClosedException.class, - ShardNotInPrimaryModeException.class + ShardNotInPrimaryModeException.class, + ReplicationOperation.RetryOnPrimaryException.class ) == null ? Level.WARN : Level.DEBUG; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4c4d56cb60060..2201476d0f482 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -3627,7 +3627,8 @@ public int getActiveOperationsCount() { * listener handles all exception cases internally. */ public final void syncAfterWrite(Translog.Location location, Consumer syncListener) { - assert indexShardOperationPermits.getActiveOperationsCount() != 0; + // TODO AwaitsFix https://github.com/elastic/elasticsearch/issues/97183 + // assert indexShardOperationPermits.getActiveOperationsCount() != 0; verifyNotClosed(); getEngine().asyncEnsureTranslogSynced(location, syncListener); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index 3ab70a37a3c01..ca9de756ca211 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -59,7 +59,7 @@ final class ShardSplittingQuery extends Query { this.indexMetadata = indexMetadata; this.indexRouting = IndexRouting.fromIndexMetadata(indexMetadata); this.shardId = shardId; - this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetadata.getCreationVersion().indexVersion) : null; + this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetadata.getCreationVersion()) : null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 67f3b463834f7..a8693f8fcc756 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -101,7 +101,12 @@ public void reset() throws IOException { @Override public int read() throws IOException { - return readByte() & 0xFF; + int b = delegate.read(); + if (b == -1) { + return b; + } + digest.update((byte) b); + return b; } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 09e4b798830f2..de47fb04bf087 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -1805,4 +1805,7 @@ public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { return timestampFieldMapperService.getTimestampFieldType(index); } + public IndexScopedSettings getIndexScopedSettings() { + return indexScopedSettings; + } } diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index d92b1935fd79e..a81be7fb037f8 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -931,10 +931,6 @@ private class FailedShardHandler implements Consumer { @Override public void accept(final IndexShard.ShardFailure shardFailure) { final ShardRouting shardRouting = shardFailure.routing(); - if (shardRouting.initializing()) { - // no need to fail the shard here during recovery, the recovery code will take care of failing it - return; - } threadPool.generic().execute(() -> { synchronized (IndicesClusterStateService.this) { failAndRemoveShard( diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index b3f2c60f90740..8eb3894c7c87b 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -16,8 +16,10 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -90,6 +92,7 @@ public static class Actions { public static final String HANDOFF_PRIMARY_CONTEXT = "internal:index/shard/recovery/handoff_primary_context"; } + private final Client client; private final ThreadPool threadPool; private final TransportService transportService; @@ -101,12 +104,14 @@ public static class Actions { private final RecoveriesCollection onGoingRecoveries; public PeerRecoveryTargetService( + Client client, ThreadPool threadPool, TransportService transportService, RecoverySettings recoverySettings, ClusterService clusterService, SnapshotFilesProvider snapshotFilesProvider ) { + this.client = client; this.threadPool = threadPool; this.transportService = transportService; this.recoverySettings = recoverySettings; @@ -289,7 +294,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi assert preExistingRequest == null; assert indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot() == false; ActionListener.run(cleanupOnly.map(v -> { - logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); + logger.trace("{} preparing unpromotable shard for recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); // Skip unnecessary intermediate stages recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); @@ -303,6 +308,35 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi return; } + if (indexShard.routingEntry().isSearchable() == false && recoveryState.getPrimary()) { + assert preExistingRequest == null; + assert indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot() == false; + try (onCompletion) { + client.execute( + StatelessPrimaryRelocationAction.INSTANCE, + new StatelessPrimaryRelocationAction.Request( + recoveryId, + indexShard.shardId(), + transportService.getLocalNode(), + indexShard.routingEntry().allocationId().getId() + ), + new ActionListener<>() { + @Override + public void onResponse(ActionResponse.Empty ignored) { + onGoingRecoveries.markRecoveryAsDone(recoveryId); + } + + @Override + public void onFailure(Exception e) { + // TODO retries? See RecoveryResponseHandler#handleException + onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryState, null, e), true); + } + } + ); + return; + } + } + record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, String actionName, TransportRequest requestToSend) {} final ActionListener toSendListener = cleanupOnly.map(r -> { logger.trace( diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index b67930db111e8..c3db9cc118356 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -1088,6 +1088,7 @@ protected Node( b.bind(PeerRecoveryTargetService.class) .toInstance( new PeerRecoveryTargetService( + client, threadPool, transportService, recoverySettings, diff --git a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java index a61f0fc6721af..ff6d21f3039a8 100644 --- a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java @@ -8,10 +8,10 @@ package org.elasticsearch.plugins; -import org.elasticsearch.Version; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.repositories.Repository; import org.elasticsearch.snapshots.Snapshot; @@ -67,7 +67,7 @@ default Map getInternalRepositories( * * returns null if no check is provided */ - default BiConsumer addPreRestoreVersionCheck() { + default BiConsumer addPreRestoreVersionCheck() { return null; } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index 370c1a3a2b977..621bd98e3f299 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -8,11 +8,11 @@ package org.elasticsearch.repositories; -import org.elasticsearch.Version; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.fs.FsRepository; @@ -85,9 +85,9 @@ public RepositoriesModule( } } - List> preRestoreChecks = new ArrayList<>(); + List> preRestoreChecks = new ArrayList<>(); for (RepositoryPlugin repoPlugin : repoPlugins) { - BiConsumer preRestoreCheck = repoPlugin.addPreRestoreVersionCheck(); + BiConsumer preRestoreCheck = repoPlugin.addPreRestoreVersionCheck(); if (preRestoreCheck != null) { preRestoreChecks.add(preRestoreCheck); } @@ -100,7 +100,7 @@ public RepositoriesModule( "the snapshot was created with Elasticsearch version [" + version + "] which is below the current versions minimum index compatibility version [" - + Version.CURRENT.minimumIndexCompatibilityVersion() + + IndexVersion.MINIMUM_COMPATIBLE + "]" ); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 531f430f01e05..dd0eed50e0432 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; @@ -43,6 +42,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.threadpool.ThreadPool; @@ -100,7 +100,7 @@ public class RepositoriesService extends AbstractLifecycleComponent implements C private volatile Map repositories = Collections.emptyMap(); private final RepositoriesStatsArchive repositoriesStatsArchive; - private final List> preRestoreChecks; + private final List> preRestoreChecks; public RepositoriesService( Settings settings, @@ -109,7 +109,7 @@ public RepositoriesService( Map typesRegistry, Map internalTypesRegistry, ThreadPool threadPool, - List> preRestoreChecks + List> preRestoreChecks ) { this.typesRegistry = typesRegistry; this.internalTypesRegistry = internalTypesRegistry; @@ -903,7 +903,7 @@ private static RepositoryConflictException newRepositoryConflictException(String ); } - public List> getPreRestoreVersionChecks() { + public List> getPreRestoreVersionChecks() { return preRestoreChecks; } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 089b5a6e639ba..1f12ac8b43460 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotState; @@ -309,7 +310,7 @@ public SnapshotState getSnapshotState(final SnapshotId snapshotId) { * Returns the {@link Version} for the given snapshot or {@code null} if unknown. */ @Nullable - public Version getVersion(SnapshotId snapshotId) { + public IndexVersion getVersion(SnapshotId snapshotId) { return snapshotsDetails.getOrDefault(snapshotId.getUUID(), SnapshotDetails.EMPTY).getVersion(); } @@ -731,9 +732,13 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final } builder.endObject(); } - final Version version = snapshotDetails.getVersion(); + final IndexVersion version = snapshotDetails.getVersion(); if (version != null) { - builder.field(VERSION, version.toString()); + if (version.before(IndexVersion.V_8_9_0)) { + builder.field(VERSION, Version.fromId(version.id()).toString()); + } else { + builder.field(VERSION, version.id()); + } } if (snapshotDetails.getStartTimeMillis() != -1) { @@ -903,13 +908,13 @@ private static void parseSnapshots( String uuid = null; SnapshotState state = null; Map metaGenerations = null; - Version version = null; + IndexVersion version = null; long startTimeMillis = -1; long endTimeMillis = -1; String slmPolicy = null; while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String currentFieldName = parser.currentName(); - parser.nextToken(); + var token = parser.nextToken(); switch (currentFieldName) { case NAME -> name = parser.text(); case UUID -> uuid = parser.text(); @@ -918,7 +923,13 @@ private static void parseSnapshots( HashMap::new, p -> stringDeduplicator.computeIfAbsent(p.text(), Function.identity()) ); - case VERSION -> version = Version.fromString(parser.text()); + case VERSION -> { + switch (token) { + case VALUE_STRING -> version = IndexVersion.fromId(Version.fromString(parser.text()).id); // 8.9.0 or before + case VALUE_NUMBER -> version = IndexVersion.fromId(parser.intValue()); // separated index version + default -> throw new IllegalStateException("Unexpected token type " + token); + } + } case START_TIME_MILLIS -> { assert startTimeMillis == -1; startTimeMillis = parser.longValue(); @@ -1051,7 +1062,7 @@ public static class SnapshotDetails { private final SnapshotState snapshotState; @Nullable // may be omitted if pre-7.6 nodes were involved somewhere - private final Version version; + private final IndexVersion version; // May be -1 if unknown, which happens if the snapshot was taken before 7.14 and hasn't been updated yet private final long startTimeMillis; @@ -1066,7 +1077,7 @@ public static class SnapshotDetails { public SnapshotDetails( @Nullable SnapshotState snapshotState, - @Nullable Version version, + @Nullable IndexVersion version, long startTimeMillis, long endTimeMillis, @Nullable String slmPolicy @@ -1084,7 +1095,7 @@ public SnapshotState getSnapshotState() { } @Nullable - public Version getVersion() { + public IndexVersion getVersion() { return version; } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d94f1495b4be4..69df0589c3b70 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -77,6 +77,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException; @@ -1373,7 +1374,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte final String slmPolicy = slmPolicy(snapshotInfo); final SnapshotDetails snapshotDetails = new SnapshotDetails( snapshotInfo.state(), - Version.CURRENT, + IndexVersion.current(), snapshotInfo.startTime(), snapshotInfo.endTime(), slmPolicy diff --git a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java index 03bd53c0a4c3b..e4868a3937e96 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java @@ -8,10 +8,11 @@ package org.elasticsearch.reservedstate.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SettingsUpdater; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; @@ -31,6 +32,8 @@ */ public class ReservedClusterSettingsAction implements ReservedClusterStateHandler> { + private static final Logger logger = LogManager.getLogger(ReservedClusterSettingsAction.class); + public static final String NAME = "cluster_settings"; private final ClusterSettings clusterSettings; @@ -73,12 +76,13 @@ public TransformState transform(Object input, TransformState prevState) { validate(request); } - ClusterState state = prevState.state(); - - TransportClusterUpdateSettingsAction.ClusterUpdateSettingsTask updateSettingsTask = - new TransportClusterUpdateSettingsAction.ClusterUpdateSettingsTask(clusterSettings, request); + final var state = new SettingsUpdater(clusterSettings).updateSettings( + prevState.state(), + request.transientSettings(), + request.persistentSettings(), + logger + ); - state = updateSettingsTask.execute(state); Set currentKeys = request.persistentSettings() .keySet() .stream() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java index 86a8295e872a8..5e8dac7fef115 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -21,8 +22,10 @@ import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.Scope.INTERNAL; import static org.elasticsearch.rest.action.admin.cluster.RestListTasksAction.listTasksResponseListener; +@ServerlessScope(INTERNAL) public class RestCancelTasksAction extends BaseRestHandler { private final Supplier nodesInCluster; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java index a2f948b448e9a..0b7f9f3907ee3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.tasks.TaskId; @@ -20,7 +21,9 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.Scope.PUBLIC; +@ServerlessScope(PUBLIC) public class RestGetTaskAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index cbf8baa9a2ea9..3fa9a104ca71a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.tasks.TaskId; @@ -27,7 +28,9 @@ import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.Scope.INTERNAL; +@ServerlessScope(INTERNAL) public class RestListTasksAction extends BaseRestHandler { private final Supplier nodesInCluster; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java index 4a875454ad54f..96b7bf0100fd7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java @@ -15,13 +15,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.Scope.PUBLIC; +@ServerlessScope(PUBLIC) public class RestAddIndexBlockAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java index 2b0ce6bf7cf78..fb7ac3dbf61be 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -21,7 +22,9 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.Scope.INTERNAL; +@ServerlessScope(INTERNAL) public class RestFlushAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index 00900ebd6ee80..12c3a28d79781 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -55,6 +55,7 @@ public final RestChannelConsumer prepareRequest(final RestRequest request, final return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } + // no @ServerlessScope on purpose, not available public static class RestShrinkIndexAction extends RestResizeHandler { @Override @@ -74,6 +75,7 @@ protected ResizeType getResizeType() { } + // no @ServerlessScope on purpose, not available public static class RestSplitIndexAction extends RestResizeHandler { @Override @@ -93,6 +95,7 @@ protected ResizeType getResizeType() { } + // no @ServerlessScope on purpose, not available public static class RestCloneIndexAction extends RestResizeHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 8f713b5b012dd..643aa6cff272e 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -713,33 +713,37 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, final ReaderContext readerContext = findReaderContext(request.contextId(), request.shardSearchRequest()); final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.shardSearchRequest()); final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest)); - runAsync(getExecutor(readerContext.indexShard()), () -> { - readerContext.setAggregatedDfs(request.dfs()); - try ( - SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, ResultsType.QUERY, true); - SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext) - ) { - searchContext.searcher().setAggregatedDfs(request.dfs()); - QueryPhase.execute(searchContext); - if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { - // no hits, we can release the context since there will be no fetch phase - freeReaderContext(readerContext.id()); + rewriteAndFetchShardRequest(readerContext.indexShard(), shardSearchRequest, listener.delegateFailure((l, rewritten) -> { + // fork the execution in the search thread pool + runAsync(getExecutor(readerContext.indexShard()), () -> { + readerContext.setAggregatedDfs(request.dfs()); + try ( + SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, ResultsType.QUERY, true); + SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext) + ) { + searchContext.searcher().setAggregatedDfs(request.dfs()); + QueryPhase.execute(searchContext); + if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { + // no hits, we can release the context since there will be no fetch phase + freeReaderContext(readerContext.id()); + } + executor.success(); + // Pass the rescoreDocIds to the queryResult to send them the coordinating node + // and receive them back in the fetch phase. + // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. + final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); + searchContext.queryResult().setRescoreDocIds(rescoreDocIds); + readerContext.setRescoreDocIds(rescoreDocIds); + searchContext.queryResult().incRef(); + return searchContext.queryResult(); + } catch (Exception e) { + assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); + logger.trace("Query phase failed", e); + // we handle the failure in the failure listener below + throw e; } - executor.success(); - // Pass the rescoreDocIds to the queryResult to send them the coordinating node and receive them back in the fetch phase. - // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. - final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); - searchContext.queryResult().setRescoreDocIds(rescoreDocIds); - readerContext.setRescoreDocIds(rescoreDocIds); - searchContext.queryResult().incRef(); - return searchContext.queryResult(); - } catch (Exception e) { - assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); - logger.trace("Query phase failed", e); - // we handle the failure in the failure listener below - throw e; - } - }, wrapFailureListener(listener, readerContext, markAsUsed)); + }, wrapFailureListener(l, readerContext, markAsUsed)); + })); } private Executor getExecutor(IndexShard indexShard) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 7629f3893d33a..5f16a953f802f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -451,8 +451,15 @@ public String getDescription() { MappedFieldType fieldType = fieldType(); if (fieldType != null) { - return "Field [" + fieldType.name() + "] of type [" + fieldType.typeName() + "]"; + String typeName = fieldType.typeName(); + String valuesSourceTypeName = valuesSourceType.typeName(); + if (valuesSourceType instanceof TimeSeriesValuesSourceType) { + return "Field [" + fieldType.name() + "] of type [" + typeName + "][" + valuesSourceTypeName + "]"; + } else { + // Avoid repeated names. Currently only time series values source types have a different behaviour/validation. + return "Field [" + fieldType.name() + "] of type [" + typeName + "]"; + } } - return "unmapped field"; + return "unmapped field with value source type [" + valuesSourceType.typeName() + "]"; } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 46bacb88e2095..016d208d591e0 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -981,7 +981,7 @@ static void validateSnapshotRestorable( RestoreSnapshotRequest request, RepositoryMetadata repository, SnapshotInfo snapshotInfo, - List> preRestoreVersionChecks + List> preRestoreVersionChecks ) { if (snapshotInfo.state().restorable() == false) { throw new SnapshotRestoreException( @@ -989,7 +989,7 @@ static void validateSnapshotRestorable( "unsupported snapshot state [" + snapshotInfo.state() + "]" ); } - if (Version.CURRENT.before(snapshotInfo.version())) { + if (IndexVersion.current().before(snapshotInfo.version())) { throw new SnapshotRestoreException( new Snapshot(repository.name(), snapshotInfo.snapshotId()), "the snapshot was created with Elasticsearch version [" @@ -1594,7 +1594,7 @@ private static IndexMetadata convertLegacyIndex( ClusterState clusterState, IndicesService indicesService ) { - if (snapshotIndexMetadata.getCreationVersion().before(Version.fromString("5.0.0"))) { + if (snapshotIndexMetadata.getCreationVersion().before(IndexVersion.fromId(5000099))) { throw new IllegalArgumentException("can't restore an index created before version 5.0.0"); } IndexMetadata.Builder convertedIndexMetadataBuilder = IndexMetadata.builder(snapshotIndexMetadata); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index c16ddd831f78c..32cd9b8b74462 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -20,6 +19,7 @@ import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.RepositoryShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -188,7 +188,7 @@ public SnapshotInfo build() { } SnapshotState snapshotState = state == null ? null : SnapshotState.valueOf(state); - Version version = this.version == -1 ? Version.CURRENT : Version.fromId(this.version); + IndexVersion version = this.version == -1 ? IndexVersion.current() : IndexVersion.fromId(this.version); int totalShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.getTotalShards(); int successfulShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.getSuccessfulShards(); @@ -313,7 +313,7 @@ int getSuccessfulShards() { private final Map userMetadata; @Nullable - private final Version version; + private final IndexVersion version; private final List shardFailures; @@ -350,7 +350,7 @@ public SnapshotInfo( List indices, List dataStreams, List featureStates, - Version version, + IndexVersion version, SnapshotState state ) { this( @@ -389,7 +389,7 @@ public static SnapshotInfo inProgress(SnapshotsInProgress.Entry entry) { entry.dataStreams(), entry.featureStates(), null, - Version.CURRENT, + IndexVersion.current(), entry.startTime(), 0L, totalShards, @@ -422,7 +422,7 @@ public SnapshotInfo( dataStreams, featureStates, reason, - Version.CURRENT, + IndexVersion.current(), startTime, endTime, totalShards, @@ -441,7 +441,7 @@ public SnapshotInfo( List dataStreams, List featureStates, String reason, - Version version, + IndexVersion version, long startTime, long endTime, int totalShards, @@ -510,7 +510,7 @@ public static SnapshotInfo readFrom(final StreamInput in) throws IOException { final int totalShards = in.readVInt(); final int successfulShards = in.readVInt(); final List shardFailures = in.readImmutableList(SnapshotShardFailure::new); - final Version version = in.readBoolean() ? Version.readVersion(in) : null; + final IndexVersion version = in.readBoolean() ? IndexVersion.readVersion(in) : null; final Boolean includeGlobalState = in.readOptionalBoolean(); final Map userMetadata = in.readMap(); final List dataStreams = in.readImmutableStringList(); @@ -666,7 +666,7 @@ public List shardFailures() { * @return version of elasticsearch that the snapshot was created with */ @Nullable - public Version version() { + public IndexVersion version() { return version; } @@ -769,7 +769,7 @@ public XContentBuilder toXContentExternal(final XContentBuilder builder, final T } if (version != null) { - builder.field(VERSION_ID, version.id); + builder.field(VERSION_ID, version.id()); builder.field(VERSION, version.toString()); } @@ -848,7 +848,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final ToXConten builder.field(NAME, snapshotId.getName()); builder.field(UUID, snapshotId.getUUID()); assert version != null : "version must always be known when writing a snapshot metadata blob"; - builder.field(VERSION_ID, version.id); + builder.field(VERSION_ID, version.id()); builder.startArray(INDICES); for (String index : indices) { builder.value(index); @@ -903,7 +903,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final ToXConten public static SnapshotInfo fromXContentInternal(final String repoName, final XContentParser parser) throws IOException { String name = null; String uuid = null; - Version version = Version.CURRENT; + IndexVersion version = IndexVersion.current(); SnapshotState state = SnapshotState.IN_PROGRESS; String reason = null; List indices = Collections.emptyList(); @@ -954,7 +954,7 @@ public static SnapshotInfo fromXContentInternal(final String repoName, final XCo successfulShards = parser.intValue(); break; case VERSION_ID: - version = Version.fromId(parser.intValue()); + version = IndexVersion.fromId(parser.intValue()); break; case INCLUDE_GLOBAL_STATE: includeGlobalState = parser.booleanValue(); @@ -1035,7 +1035,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeList(shardFailures); if (version != null) { out.writeBoolean(true); - Version.writeVersion(version, out); + IndexVersion.writeVersion(version, out); } else { out.writeBoolean(false); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 69db481d3d82e..5507d94ae7dfd 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -72,6 +72,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; @@ -2168,7 +2169,7 @@ public static Version minCompatibleVersion( for (SnapshotId snapshotId : snapshotIds.stream() .filter(excluded == null ? sn -> true : Predicate.not(excluded::contains)) .toList()) { - final Version known = repositoryData.getVersion(snapshotId); + final IndexVersion known = repositoryData.getVersion(snapshotId); // If we don't have the version cached in the repository data yet we load it from the snapshot info blobs if (known == null) { assert repositoryData.shardGenerations().totalShards() == 0 @@ -2179,7 +2180,7 @@ public static Version minCompatibleVersion( + "]"; return OLD_SNAPSHOT_FORMAT; } else { - minCompatVersion = minCompatVersion.before(known) ? minCompatVersion : known; + minCompatVersion = minCompatVersion.before(known.toVersion()) ? minCompatVersion : known.toVersion(); } } return minCompatVersion; diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java index b7b5829a9c433..a67ca691eb8f2 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java @@ -55,7 +55,7 @@ import java.util.function.Consumer; import java.util.stream.Collectors; -import static org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION; +import static org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_INDEX_VERSION; import static org.elasticsearch.cluster.metadata.IndexMetadata.State.CLOSE; import static org.elasticsearch.core.Strings.format; @@ -359,7 +359,7 @@ private static boolean needsToBeMigrated(IndexMetadata indexMetadata) { if (indexMetadata == null) { return false; } - return indexMetadata.isSystem() && indexMetadata.getCreationVersion().before(NO_UPGRADE_REQUIRED_VERSION); + return indexMetadata.isSystem() && indexMetadata.getCreationVersion().before(NO_UPGRADE_REQUIRED_INDEX_VERSION); } private void migrateSingleIndex(ClusterState clusterState, Consumer listener) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java index b7242c4ffcee5..1488b6ac519f7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.action.admin.cluster.migration; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.Collections; @@ -89,7 +89,7 @@ public void testUpgradeStatusCominations() { private static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus createFeatureStatus() { return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus( randomAlphaOfLengthBetween(3, 20), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), IndexVersion.MINIMUM_COMPATIBLE), randomFrom(org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.values()), randomList(4, GetFeatureUpgradeStatusResponseTests::getIndexInfo) ); @@ -98,7 +98,7 @@ private static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus createFeatur private static GetFeatureUpgradeStatusResponse.IndexInfo getIndexInfo() { return new GetFeatureUpgradeStatusResponse.IndexInfo( randomAlphaOfLengthBetween(3, 20), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), IndexVersion.MINIMUM_COMPATIBLE), null ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java index cb6f711f6b1ef..a52f0cfd080e9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptorUtils; import org.elasticsearch.indices.SystemIndices; @@ -24,13 +25,14 @@ import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; public class TransportGetFeatureUpgradeStatusActionTests extends ESTestCase { public static String TEST_SYSTEM_INDEX_PATTERN = ".test*"; + private static final IndexVersion TEST_OLD_VERSION = IndexVersion.fromId(6000099); private static final ClusterState CLUSTER_STATE = getClusterState(); private static final SystemIndices.Feature FEATURE = getFeature(); - private static final Version TEST_OLD_VERSION = Version.fromString("6.0.0"); public void testGetFeatureStatus() { GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus status = TransportGetFeatureUpgradeStatusAction.getFeatureUpgradeStatus( @@ -41,7 +43,7 @@ public void testGetFeatureStatus() { assertThat(status.getUpgradeStatus(), equalTo(MIGRATION_NEEDED)); assertThat(status.getFeatureName(), equalTo("test-feature")); assertThat(status.getMinimumIndexVersion(), equalTo(TEST_OLD_VERSION)); - assertThat(status.getIndexVersions().size(), equalTo(2)); // additional testing below + assertThat(status.getIndexVersions(), hasSize(2)); // additional testing below } public void testGetIndexInfos() { @@ -50,11 +52,11 @@ public void testGetIndexInfos() { FEATURE ); - assertThat(versions.size(), equalTo(2)); + assertThat(versions, hasSize(2)); { GetFeatureUpgradeStatusResponse.IndexInfo version = versions.get(0); - assertThat(version.getVersion(), equalTo(Version.CURRENT)); + assertThat(version.getVersion(), equalTo(IndexVersion.current())); assertThat(version.getIndexName(), equalTo(".test-index-1")); } { @@ -77,7 +79,7 @@ private static SystemIndices.Feature getFeature() { private static ClusterState getClusterState() { IndexMetadata indexMetadata1 = IndexMetadata.builder(".test-index-1") - .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .settings(Settings.builder().put("index.version.created", IndexVersion.current().id()).build()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -86,7 +88,7 @@ private static ClusterState getClusterState() { assert Version.CURRENT.major < 9; IndexMetadata indexMetadata2 = IndexMetadata.builder(".test-index-2") - .settings(Settings.builder().put("index.version.created", Version.fromString("6.0.0")).build()) + .settings(Settings.builder().put("index.version.created", TEST_OLD_VERSION.id()).build()) .numberOfShards(1) .numberOfReplicas(0) .build(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 8ce910be0c49f..f5302554cec75 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -122,8 +122,8 @@ public void testToXContentWithDeprecatedClusterState() { "voting_only" ], "version": "%s", - "minIndexVersion": "%s", - "maxIndexVersion": "%s" + "min_index_version": %s, + "max_index_version": %s } }, "transport_versions": [ diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 02781a23c97f6..cbc7bd2983fea 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; @@ -28,7 +29,6 @@ import java.nio.file.Path; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -54,7 +54,7 @@ protected VersionStats mutateInstance(VersionStats instance) { return new VersionStats(instance.versionStats().stream().map(svs -> { return switch (randomIntBetween(1, 4)) { case 1 -> new VersionStats.SingleVersionStats( - Version.V_7_3_0, + IndexVersion.V_7_3_0, svs.indexCount, svs.primaryShardCount, svs.totalPrimaryByteCount @@ -93,8 +93,8 @@ public void testCreation() { .build(); stats = VersionStats.of(metadata, Collections.emptyList()); assertThat(stats.versionStats().size(), equalTo(2)); - VersionStats.SingleVersionStats s1 = new VersionStats.SingleVersionStats(Version.CURRENT, 2, 7, 0); - VersionStats.SingleVersionStats s2 = new VersionStats.SingleVersionStats(Version.V_7_0_0, 1, 2, 0); + VersionStats.SingleVersionStats s1 = new VersionStats.SingleVersionStats(IndexVersion.current(), 2, 7, 0); + VersionStats.SingleVersionStats s2 = new VersionStats.SingleVersionStats(IndexVersion.V_7_0_0, 1, 2, 0); assertThat(stats.versionStats(), containsInAnyOrder(s1, s2)); ShardId shardId = new ShardId("bar", "uuid", 0); @@ -132,8 +132,8 @@ public void testCreation() { stats = VersionStats.of(metadata, Collections.singletonList(nodeResponse)); assertThat(stats.versionStats().size(), equalTo(2)); - s1 = new VersionStats.SingleVersionStats(Version.CURRENT, 2, 7, 100); - s2 = new VersionStats.SingleVersionStats(Version.V_7_0_0, 1, 2, 0); + s1 = new VersionStats.SingleVersionStats(IndexVersion.current(), 2, 7, 100); + s2 = new VersionStats.SingleVersionStats(IndexVersion.V_7_0_0, 1, 2, 0); assertThat(stats.versionStats(), containsInAnyOrder(s1, s2)); } @@ -142,9 +142,9 @@ private static IndexMetadata indexMeta(String name, Version version, int primary } public static VersionStats randomInstance() { - List versions = Arrays.asList(Version.CURRENT, Version.V_7_0_0, Version.V_7_1_0, Version.V_7_2_0); + List versions = List.of(IndexVersion.current(), IndexVersion.V_7_0_0, IndexVersion.V_7_1_0, IndexVersion.V_7_2_0); List stats = new ArrayList<>(); - for (Version v : versions) { + for (IndexVersion v : versions) { VersionStats.SingleVersionStats s = new VersionStats.SingleVersionStats( v, randomIntBetween(10, 20), diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 5345fa5539d64..ba40126610c6a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -202,8 +202,8 @@ public void testToXContent() throws IOException { "voting_only" ], "version": "%s", - "minIndexVersion":"%s", - "maxIndexVersion":"%s" + "min_index_version":%s, + "max_index_version":%s } }, "transport_versions" : [ @@ -457,8 +457,8 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "voting_only" ], "version" : "%s", - "minIndexVersion" : "%s", - "maxIndexVersion" : "%s" + "min_index_version" : %s, + "max_index_version" : %s } }, "transport_versions" : [ @@ -708,8 +708,8 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "voting_only" ], "version" : "%s", - "minIndexVersion" : "%s", - "maxIndexVersion" : "%s" + "min_index_version" : %s, + "max_index_version" : %s } }, "transport_versions" : [ diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java index a1baf8c1c4870..89c624711c2d2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java @@ -120,12 +120,7 @@ private static Settings randomSettings() { } private static DataLifecycle randomLifecycle() { - return switch (randomIntBetween(0, 3)) { - case 0 -> DataLifecycleTests.IMPLICIT_INFINITE_RETENTION; - case 1 -> Template.NO_LIFECYCLE; - case 2 -> DataLifecycleTests.EXPLICIT_INFINITE_RETENTION; - default -> new DataLifecycle(randomMillisUpToYear9999()); - }; + return rarely() ? Template.NO_LIFECYCLE : DataLifecycleTests.randomLifecycle(); } private static Map randomMeta() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java index 6a381f1f764bc..a128ba8629749 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java @@ -11,12 +11,15 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.admin.indices.rollover.RolloverConfigurationTests; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -25,6 +28,8 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.containsString; @@ -33,9 +38,6 @@ public class DataLifecycleTests extends AbstractXContentSerializingTestCase { - public static final DataLifecycle EXPLICIT_INFINITE_RETENTION = new DataLifecycle(DataLifecycle.Retention.NULL); - public static final DataLifecycle IMPLICIT_INFINITE_RETENTION = new DataLifecycle((TimeValue) null); - @Override protected Writeable.Reader instanceReader() { return DataLifecycle::new; @@ -43,28 +45,48 @@ protected Writeable.Reader instanceReader() { @Override protected DataLifecycle createTestInstance() { - return switch (randomInt(2)) { - case 0 -> IMPLICIT_INFINITE_RETENTION; - case 1 -> EXPLICIT_INFINITE_RETENTION; - default -> new DataLifecycle(randomMillisUpToYear9999()); - }; + return randomLifecycle(); } @Override protected DataLifecycle mutateInstance(DataLifecycle instance) throws IOException { - if (IMPLICIT_INFINITE_RETENTION.equals(instance)) { - return randomBoolean() ? EXPLICIT_INFINITE_RETENTION : new DataLifecycle(randomMillisUpToYear9999()); - } - if (EXPLICIT_INFINITE_RETENTION.equals(instance)) { - return randomBoolean() ? IMPLICIT_INFINITE_RETENTION : new DataLifecycle(randomMillisUpToYear9999()); + var retention = instance.getDataRetention(); + var downsampling = instance.getDownsampling(); + if (randomBoolean()) { + if (retention == null || retention == DataLifecycle.Retention.NULL) { + retention = randomValueOtherThan(retention, DataLifecycleTests::randomRetention); + } else { + retention = switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> new DataLifecycle.Retention( + TimeValue.timeValueMillis(randomValueOtherThan(retention.value().millis(), ESTestCase::randomMillisUpToYear9999)) + ); + }; + } + } else { + if (downsampling == null || downsampling == DataLifecycle.Downsampling.NULL) { + downsampling = randomValueOtherThan(downsampling, DataLifecycleTests::randomDownsampling); + } else { + downsampling = switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> { + if (downsampling.rounds().size() == 1) { + yield new DataLifecycle.Downsampling( + List.of(downsampling.rounds().get(0), nextRound(downsampling.rounds().get(0))) + ); + + } else { + var updatedRounds = new ArrayList<>(downsampling.rounds()); + updatedRounds.remove(randomInt(downsampling.rounds().size() - 1)); + yield new DataLifecycle.Downsampling(updatedRounds); + } + } + }; + } } - return switch (randomInt(2)) { - case 0 -> IMPLICIT_INFINITE_RETENTION; - case 1 -> EXPLICIT_INFINITE_RETENTION; - default -> new DataLifecycle( - randomValueOtherThan(instance.getEffectiveDataRetention().millis(), ESTestCase::randomMillisUpToYear9999) - ); - }; + return new DataLifecycle(retention, downsampling); } @Override @@ -120,4 +142,116 @@ public void testInvalidClusterSetting() { assertThat(exception.getMessage(), equalTo("The rollover conditions cannot be null or blank")); } } + + public void testInvalidDownsamplingConfiguration() { + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(10), + new DownsampleConfig(new DateHistogramInterval("2h")) + ), + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(3), + new DownsampleConfig(new DateHistogramInterval("2h")) + ) + ) + ) + ); + assertThat( + exception.getMessage(), + equalTo("A downsampling round must have a later 'after' value than the proceeding, 3d is not after 10d.") + ); + } + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(10), + new DownsampleConfig(new DateHistogramInterval("2h")) + ), + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(30), + new DownsampleConfig(new DateHistogramInterval("2h")) + ) + ) + ) + ); + assertThat(exception.getMessage(), equalTo("Downsampling interval [2h] must be greater than the source index interval [2h].")); + } + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(10), + new DownsampleConfig(new DateHistogramInterval("2h")) + ), + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(30), + new DownsampleConfig(new DateHistogramInterval("3h")) + ) + ) + ) + ); + assertThat(exception.getMessage(), equalTo("Downsampling interval [3h] must be a multiple of the source index interval [2h].")); + } + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling(List.of()) + ); + assertThat(exception.getMessage(), equalTo("Downsampling configuration should have at least one round configured.")); + } + } + + @Nullable + public static DataLifecycle randomLifecycle() { + return new DataLifecycle(randomRetention(), randomDownsampling()); + } + + @Nullable + private static DataLifecycle.Retention randomRetention() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> new DataLifecycle.Retention(TimeValue.timeValueMillis(randomMillisUpToYear9999())); + }; + } + + @Nullable + private static DataLifecycle.Downsampling randomDownsampling() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> { + var count = randomIntBetween(0, 10); + List rounds = new ArrayList<>(); + var previous = new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(randomIntBetween(1, 365)), + new DownsampleConfig(new DateHistogramInterval(randomIntBetween(1, 24) + "h")) + ); + rounds.add(previous); + for (int i = 0; i < count; i++) { + DataLifecycle.Downsampling.Round round = nextRound(previous); + rounds.add(round); + previous = round; + } + yield new DataLifecycle.Downsampling(rounds); + } + }; + } + + private static DataLifecycle.Downsampling.Round nextRound(DataLifecycle.Downsampling.Round previous) { + var after = TimeValue.timeValueDays(previous.after().days() + randomIntBetween(1, 10)); + var fixedInterval = new DownsampleConfig( + new DateHistogramInterval((previous.config().getFixedInterval().estimateMillis() * randomIntBetween(2, 5)) + "ms") + ); + return new DataLifecycle.Downsampling.Round(after, fixedInterval); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index d759952c73ed4..84a6e03ce8cc5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; @@ -31,6 +32,7 @@ import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -1500,11 +1502,20 @@ public void testResolveLifecycle() throws Exception { final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); ClusterState state = ClusterState.EMPTY_STATE; + DataLifecycle emptyLifecycle = new DataLifecycle(); + DataLifecycle lifecycle30d = new DataLifecycle(TimeValue.timeValueDays(30)); String ct30d = "ct_30d"; state = addComponentTemplate(service, state, ct30d, lifecycle30d); - DataLifecycle lifecycle45d = new DataLifecycle(TimeValue.timeValueDays(45)); + DataLifecycle lifecycle45d = new DataLifecycle( + new DataLifecycle.Retention(TimeValue.timeValueDays(45)), + new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round(TimeValue.timeValueDays(30), new DownsampleConfig(new DateHistogramInterval("3h"))) + ) + ) + ); String ct45d = "ct_45d"; state = addComponentTemplate(service, state, ct45d, lifecycle45d); @@ -1513,7 +1524,7 @@ public void testResolveLifecycle() throws Exception { state = addComponentTemplate(service, state, ctNullRetention, lifecycleNullRetention); String ctEmptyLifecycle = "ct_empty_lifecycle"; - state = addComponentTemplate(service, state, ctEmptyLifecycle, DataLifecycleTests.IMPLICIT_INFINITE_RETENTION); + state = addComponentTemplate(service, state, ctEmptyLifecycle, emptyLifecycle); String ctNullLifecycle = "ct_null_lifecycle"; state = addComponentTemplate(service, state, ctNullLifecycle, Template.NO_LIFECYCLE); @@ -1525,13 +1536,7 @@ public void testResolveLifecycle() throws Exception { // Component B: "lifecycle": {} // Composable Z: - // Result: "lifecycle": {} - assertLifecycleResolution( - service, - state, - List.of(ctNoLifecycle, ctEmptyLifecycle), - null, - DataLifecycleTests.IMPLICIT_INFINITE_RETENTION - ); + assertLifecycleResolution(service, state, List.of(ctNoLifecycle, ctEmptyLifecycle), null, emptyLifecycle); // Component A: "lifecycle": {} // Component B: "lifecycle": {"retention": "30d"} @@ -1540,16 +1545,22 @@ public void testResolveLifecycle() throws Exception { assertLifecycleResolution(service, state, List.of(ctEmptyLifecycle, ct30d), null, lifecycle30d); // Component A: "lifecycle": {"retention": "30d"} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": {} - // Result: "lifecycle": {"retention": "45d"} - assertLifecycleResolution(service, state, List.of(ct30d, ct45d), DataLifecycleTests.IMPLICIT_INFINITE_RETENTION, lifecycle45d); + // Result: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} + assertLifecycleResolution(service, state, List.of(ct30d, ct45d), emptyLifecycle, lifecycle45d); // Component A: "lifecycle": {} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": {"retention": "30d"} - // Result: "lifecycle": {"retention": "30d"} - assertLifecycleResolution(service, state, List.of(ctEmptyLifecycle, ct45d), lifecycle30d, lifecycle30d); + // Result: "lifecycle": {"retention": "30d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} + assertLifecycleResolution( + service, + state, + List.of(ctEmptyLifecycle, ct45d), + lifecycle30d, + new DataLifecycle(lifecycle30d.getDataRetention(), lifecycle45d.getDownsampling()) + ); // Component A: "lifecycle": {"retention": "30d"} // Component B: "lifecycle": {"retention": null} @@ -1559,14 +1570,21 @@ public void testResolveLifecycle() throws Exception { assertLifecycleResolution(service, state, List.of(ct30d, ctNullRetention), null, lifecycleNullRetention); // Component A: "lifecycle": {} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": {"retention": null} - // Result: "lifecycle": {"retention": null} , here the result of the composition is with retention explicitly + // Result: "lifecycle": {"retention": null, "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} , here the result of the + // composition is with retention explicitly // nullified, but effectively this is equivalent to "lifecycle": {} when there is no further composition. - assertLifecycleResolution(service, state, List.of(ctEmptyLifecycle, ct45d), lifecycleNullRetention, lifecycleNullRetention); + assertLifecycleResolution( + service, + state, + List.of(ctEmptyLifecycle, ct45d), + lifecycleNullRetention, + new DataLifecycle(DataLifecycle.Retention.NULL, lifecycle45d.getDownsampling()) + ); // Component A: "lifecycle": {"retention": "30d"} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": null // Result: null aka unmanaged assertLifecycleResolution(service, state, List.of(ct30d, ct45d), Template.NO_LIFECYCLE, null); @@ -1579,8 +1597,8 @@ public void testResolveLifecycle() throws Exception { // Component A: "lifecycle": {"retention": "30d"} // Component B: "lifecycle": null - // Composable Z: "lifecycle": {"retention": "45d"} - // Result: "lifecycle": {"retention": "45d"} + // Composable Z: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} + // Result: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} assertLifecycleResolution(service, state, List.of(ct30d, ctNullLifecycle), lifecycle45d, lifecycle45d); } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java index 76d43894a55f7..7ae05025ce069 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java @@ -214,8 +214,8 @@ public void testDiscoveryNodeToXContent() { "voting_only" ], "version" : "%s", - "minIndexVersion" : "%s", - "maxIndexVersion" : "%s" + "min_index_version" : %s, + "max_index_version" : %s } }""", transportAddress, diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index 645b327e207c4..a8a2d05ac4b8e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -402,48 +402,43 @@ Set matchingNodeIds(DiscoveryNodes nodes) { abstract Set matchingNodeIds(DiscoveryNodes nodes); } - public void testMaxMinNodeVersion() { + public void testMinMaxNodeVersions() { assertEquals(Version.CURRENT, DiscoveryNodes.EMPTY_NODES.getMaxNodeVersion()); assertEquals(Version.CURRENT.minimumCompatibilityVersion(), DiscoveryNodes.EMPTY_NODES.getMinNodeVersion()); - - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - discoBuilder.add( - new DiscoveryNode( - "name_" + 1, - "node_" + 1, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - new HashSet<>(randomSubsetOf(DiscoveryNodeRole.roles())), - VersionInformation.inferVersions(Version.fromString("5.1.0")) - ) + assertEquals(IndexVersion.current(), DiscoveryNodes.EMPTY_NODES.getMaxDataNodeCompatibleIndexVersion()); + assertEquals(IndexVersion.MINIMUM_COMPATIBLE, DiscoveryNodes.EMPTY_NODES.getMinSupportedIndexVersion()); + + // use a mix of versions with major, minor, and patch numbers + List dataVersions = List.of( + new VersionInformation(Version.fromString("3.2.5"), IndexVersion.fromId(2000099), IndexVersion.fromId(3020599)), + new VersionInformation(Version.fromString("3.0.7"), IndexVersion.fromId(2000099), IndexVersion.fromId(3000799)), + new VersionInformation(Version.fromString("2.1.0"), IndexVersion.fromId(1050099), IndexVersion.fromId(2010099)) ); - discoBuilder.add( - new DiscoveryNode( - "name_" + 2, - "node_" + 2, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - new HashSet<>(randomSubsetOf(DiscoveryNodeRole.roles())), - VersionInformation.inferVersions(Version.fromString("6.3.0")) - ) + List observerVersions = List.of( + new VersionInformation(Version.fromString("5.0.17"), IndexVersion.fromId(0), IndexVersion.fromId(5001799)), + new VersionInformation(Version.fromString("2.0.1"), IndexVersion.fromId(1000099), IndexVersion.fromId(2000199)), + new VersionInformation(Version.fromString("1.6.0"), IndexVersion.fromId(0), IndexVersion.fromId(1060099)) ); - discoBuilder.localNodeId("node_" + between(1, 3)); - if (randomBoolean()) { - discoBuilder.masterNodeId("node_" + between(1, 3)); + + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < dataVersions.size(); i++) { + discoBuilder.add( + DiscoveryNodeUtils.builder("data_" + i) + .version(dataVersions.get(i)) + .roles(Set.of(randomBoolean() ? DiscoveryNodeRole.DATA_ROLE : DiscoveryNodeRole.MASTER_ROLE)) + .build() + ); + } + for (int i = 0; i < observerVersions.size(); i++) { + discoBuilder.add(DiscoveryNodeUtils.builder("observer_" + i).version(observerVersions.get(i)).roles(Set.of()).build()); } - discoBuilder.add( - new DiscoveryNode( - "name_" + 3, - "node_" + 3, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - new HashSet<>(randomSubsetOf(DiscoveryNodeRole.roles())), - VersionInformation.inferVersions(Version.fromString("1.1.0")) - ) - ); DiscoveryNodes build = discoBuilder.build(); - assertEquals(Version.fromString("6.3.0"), build.getMaxNodeVersion()); - assertEquals(Version.fromString("1.1.0"), build.getMinNodeVersion()); + + assertEquals(Version.fromString("5.0.17"), build.getMaxNodeVersion()); + assertEquals(Version.fromString("1.6.0"), build.getMinNodeVersion()); + assertEquals(Version.fromString("2.1.0"), build.getSmallestNonClientNodeVersion()); // doesn't include 1.6.0 observer + assertEquals(IndexVersion.fromId(2010099), build.getMaxDataNodeCompatibleIndexVersion()); // doesn't include 2000199 observer + assertEquals(IndexVersion.fromId(2000099), build.getMinSupportedIndexVersion()); // also includes observers } private static String noAttr(DiscoveryNode discoveryNode) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java index f462fa46f2284..fe7c36ff458dc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java @@ -7,10 +7,13 @@ */ package org.elasticsearch.cluster.routing; +import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; @@ -18,6 +21,8 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -28,6 +33,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -229,6 +235,102 @@ public void testNotifiesOnFailure() throws InterruptedException { } } - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); // i.e. it doesn't leak any listeners + safeAwait(countDownLatch); // i.e. it doesn't leak any listeners + } + + @TestLogging(reason = "testing log output", value = "org.elasticsearch.cluster.routing.BatchedRerouteService:DEBUG") + public void testExceptionFidelity() { + + final var mockLogAppender = new MockLogAppender(); + try (var ignored = mockLogAppender.capturing(BatchedRerouteService.class)) { + + clusterService.getMasterService() + .setClusterStatePublisher( + (event, publishListener, ackListener) -> publishListener.onFailure(new FailedToCommitClusterStateException("simulated")) + ); + + // Case 1: an exception thrown from within the reroute itself + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "failure within reroute", + BatchedRerouteService.class.getCanonicalName(), + Level.ERROR, + "unexpected failure" + ) + ); + + final BatchedRerouteService failingRerouteService = new BatchedRerouteService(clusterService, (s, r, l) -> { + throw new ElasticsearchException("simulated"); + }); + final var rerouteFailureFuture = new PlainActionFuture(); + failingRerouteService.reroute("publish failure", randomFrom(EnumSet.allOf(Priority.class)), rerouteFailureFuture); + assertThat( + expectThrows(ExecutionException.class, ElasticsearchException.class, () -> rerouteFailureFuture.get(10, TimeUnit.SECONDS)) + .getMessage(), + equalTo("simulated") + ); + mockLogAppender.assertAllExpectationsMatched(); + + // None of the other cases should yield any log messages by default + + mockLogAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no errors", BatchedRerouteService.class.getCanonicalName(), Level.ERROR, "*") + ); + mockLogAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no warnings", BatchedRerouteService.class.getCanonicalName(), Level.WARN, "*") + ); + mockLogAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no info", BatchedRerouteService.class.getCanonicalName(), Level.INFO, "*") + ); + + // Case 2: a FailedToCommitClusterStateException (see the call to setClusterStatePublisher above) + + final BatchedRerouteService batchedRerouteService = new BatchedRerouteService(clusterService, (s, r, l) -> { + l.onResponse(null); + return ClusterState.builder(s).build(); + }); + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "publish failure", + BatchedRerouteService.class.getCanonicalName(), + Level.DEBUG, + "unexpected failure" + ) + ); + + final var publishFailureFuture = new PlainActionFuture(); + batchedRerouteService.reroute("publish failure", randomFrom(EnumSet.allOf(Priority.class)), publishFailureFuture); + expectThrows( + ExecutionException.class, + FailedToCommitClusterStateException.class, + () -> publishFailureFuture.get(10, TimeUnit.SECONDS) + ); + mockLogAppender.assertAllExpectationsMatched(); + + // Case 3: a NotMasterException + + PlainActionFuture.get(future -> { + clusterService.getClusterApplierService().onNewClusterState("simulated", () -> { + final var state = clusterService.state(); + return ClusterState.builder(state).nodes(state.nodes().withMasterNodeId(null)).build(); + }, future); + }, 10, TimeUnit.SECONDS); + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "not-master failure", + BatchedRerouteService.class.getCanonicalName(), + Level.DEBUG, + "unexpected failure" + ) + ); + final var notMasterFuture = new PlainActionFuture(); + batchedRerouteService.reroute("not-master failure", randomFrom(EnumSet.allOf(Priority.class)), notMasterFuture); + expectThrows(ExecutionException.class, NotMasterException.class, () -> notMasterFuture.get(10, TimeUnit.SECONDS)); + + mockLogAppender.assertAllExpectationsMatched(); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index 2aa7c911e7059..56d3a3910cf5c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -8,11 +8,11 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardIdTests; import org.elasticsearch.repositories.IndexId; @@ -345,7 +345,7 @@ public void testEqualsIgnoringVersion() { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("test", new SnapshotId("s1", UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ), otherRouting.unassignedInfo(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 7bfd65c2f16c7..df0175b1200ae 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -352,7 +353,7 @@ public void testNewIndexRestored() { new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ), new HashSet<>() @@ -436,7 +437,7 @@ public void testExistingIndexRestored() { new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 572c7ab5f0ed5..0723263291f19 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.EmptySnapshotsInfoService; @@ -449,7 +450,7 @@ public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) .addAsRestore( metadata.index("test"), - new SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, Version.CURRENT, indexId) + new SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, IndexVersion.current(), indexId) ) .build() ) @@ -608,13 +609,13 @@ public void testMessages() { final SnapshotRecoverySource newVersionSnapshot = new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - newNode.node().getVersion(), + newNode.node().getVersion().indexVersion, indexId ); final SnapshotRecoverySource oldVersionSnapshot = new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - oldNode.node().getVersion(), + oldNode.node().getVersion().indexVersion, indexId ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java index 56a276b907ae4..18adf3ca32c74 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java @@ -44,9 +44,8 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; public class RoutingNodesTests extends ESAllocationTestCase { @@ -418,47 +417,14 @@ public void testNodeInterleavedShardIterator() { } public void testMoveShardWithDefaultRole() { - - var inSync = randomList(2, 2, UUIDs::randomBase64UUID); - var indexMetadata = IndexMetadata.builder("index") - .settings(indexSettings(Version.CURRENT, 1, 1)) - .putInSyncAllocationIds(0, Set.copyOf(inSync)) - .build(); - - var shardId = new ShardId(indexMetadata.getIndex(), 0); - - var indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()) - .addShard(TestShardRouting.newShardRouting(shardId, "node-1", null, true, STARTED, ShardRouting.Role.DEFAULT)) - .addShard(TestShardRouting.newShardRouting(shardId, "node-2", null, false, STARTED, ShardRouting.Role.DEFAULT)) - .build(); - - var node1 = newNode("node-1"); - var node2 = newNode("node-2"); - var node3 = newNode("node-3"); - - var clusterState = ClusterState.builder(ClusterName.DEFAULT) - .metadata(Metadata.builder().put(indexMetadata, false).build()) - .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).build()) - .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) - .build(); - - var routingNodes = clusterState.getRoutingNodes().mutableCopy(); - - routingNodes.relocateOrReinitializeShard( - routingNodes.node("node-1").getByShardId(shardId), - "node-3", - 0L, - new RoutingChangesObserver() { - } - ); - - assertThat(routingNodes.node("node-1").getByShardId(shardId).state(), equalTo(RELOCATING)); - assertThat(routingNodes.node("node-2").getByShardId(shardId).state(), equalTo(STARTED)); - assertThat(routingNodes.node("node-3").getByShardId(shardId).state(), equalTo(INITIALIZING)); + runMoveShardRolesTest(ShardRouting.Role.DEFAULT, ShardRouting.Role.DEFAULT); } public void testMoveShardWithPromotableOnlyRole() { + runMoveShardRolesTest(ShardRouting.Role.INDEX_ONLY, ShardRouting.Role.SEARCH_ONLY); + } + private void runMoveShardRolesTest(ShardRouting.Role primaryRole, ShardRouting.Role replicaRole) { var inSync = randomList(2, 2, UUIDs::randomBase64UUID); var indexMetadata = IndexMetadata.builder("index") .settings(indexSettings(Version.CURRENT, 1, 1)) @@ -468,8 +434,8 @@ public void testMoveShardWithPromotableOnlyRole() { var shardId = new ShardId(indexMetadata.getIndex(), 0); var indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()) - .addShard(TestShardRouting.newShardRouting(shardId, "node-1", null, true, STARTED, ShardRouting.Role.INDEX_ONLY)) - .addShard(TestShardRouting.newShardRouting(shardId, "node-2", null, false, STARTED, ShardRouting.Role.SEARCH_ONLY)) + .addShard(TestShardRouting.newShardRouting(shardId, "node-1", null, true, STARTED, primaryRole)) + .addShard(TestShardRouting.newShardRouting(shardId, "node-2", null, false, STARTED, replicaRole)) .build(); var node1 = newNode("node-1"); @@ -484,18 +450,13 @@ public void testMoveShardWithPromotableOnlyRole() { var routingNodes = clusterState.getRoutingNodes().mutableCopy(); - routingNodes.relocateOrReinitializeShard( - routingNodes.node("node-1").getByShardId(shardId), - "node-3", - 0L, - new RoutingChangesObserver() { - } - ); + routingNodes.relocateShard(routingNodes.node("node-1").getByShardId(shardId), "node-3", 0L, new RoutingChangesObserver() { + }); - assertThat(routingNodes.node("node-1").getByShardId(shardId), nullValue()); - assertThat(routingNodes.node("node-2").getByShardId(shardId), nullValue()); + assertThat(routingNodes.node("node-1").getByShardId(shardId).state(), equalTo(RELOCATING)); + assertThat(routingNodes.node("node-2").getByShardId(shardId).state(), equalTo(STARTED)); assertThat(routingNodes.node("node-3").getByShardId(shardId).state(), equalTo(INITIALIZING)); - assertThat(routingNodes.unassigned().ignored(), hasSize(1)); + assertThat(routingNodes.unassigned().ignored(), empty()); } private boolean assertShardStats(RoutingNodes routingNodes) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index e6d33b106c27c..95f2cce795672 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.InternalSnapshotsInfoService; @@ -386,7 +387,7 @@ private ClusterState createRecoveryStateAndInitializeAllocations( new SnapshotRecoverySource( restoreUUID, snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId(indexMetadata.getIndex().getName(), UUIDs.randomBase64UUID(random())) ), new HashSet<>() @@ -399,7 +400,7 @@ private ClusterState createRecoveryStateAndInitializeAllocations( new SnapshotRecoverySource( restoreUUID, snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId(indexMetadata.getIndex().getName(), UUIDs.randomBase64UUID(random())) ) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 8363656295474..b6ef484de0e71 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.InternalSnapshotsInfoService; @@ -556,7 +557,7 @@ public void testUnassignedAllocationPredictsDiskUsage() { final var recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(random()), new Snapshot("repo", new SnapshotId("snap", UUIDs.randomBase64UUID(random()))), - Version.CURRENT, + IndexVersion.current(), new IndexId("index", UUIDs.randomBase64UUID(random())) ); routingTable.addAsRestore(restoredIndexMetadata, recoverySource); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 8d9f17ba8e85c..e9020f29f043b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.EmptySnapshotsInfoService; @@ -1136,7 +1137,7 @@ private void doTestDiskThresholdWithSnapshotShardSizes(boolean testMaxHeadroom) RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) .addAsNewRestore( indexMetadata, - new RecoverySource.SnapshotRecoverySource("_restore_uuid", snapshot, Version.CURRENT, indexId), + new RecoverySource.SnapshotRecoverySource("_restore_uuid", snapshot, IndexVersion.current(), indexId), new HashSet<>() ) .build() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index c1d5e2a64b07a..013717dbd5cb5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -230,7 +231,7 @@ private RecoverySource.SnapshotRecoverySource createSnapshotRecoverySource(final return new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsUpdaterTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java rename to server/src/test/java/org/elasticsearch/common/settings/SettingsUpdaterTests.java index c3c7519293e60..e216a90b67edd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsUpdaterTests.java @@ -5,16 +5,13 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.action.admin.cluster.settings; +package org.elasticsearch.common.settings; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index c6cba4c114f6f..b76e41b067b2d 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Nullable; import org.elasticsearch.env.ShardLockObtainFailedException; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -475,7 +476,7 @@ private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocat new SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId(shardId.getIndexName(), UUIDs.randomBase64UUID(random())) ) ) diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index d8ab4d32c65b6..7c8177b445824 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -52,7 +52,6 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -66,7 +65,9 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.BlockingDeque; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; import static java.net.InetAddress.getByName; @@ -79,17 +80,13 @@ import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; public class AbstractHttpServerTransportTests extends ESTestCase { @@ -97,6 +94,9 @@ public class AbstractHttpServerTransportTests extends ESTestCase { private ThreadPool threadPool; private Recycler recycler; + private static final int LONG_GRACE_PERIOD_MS = 20_000; + private static final int SHORT_GRACE_PERIOD_MS = 1; + @Before public void setup() throws Exception { networkService = new NetworkService(Collections.emptyList()); @@ -422,7 +422,7 @@ public void testHandlingCompatibleVersionParsingErrors() { headers ); - transport.incomingRequest(fakeHttpRequest, null); + transport.incomingRequest(fakeHttpRequest, new TestHttpChannel()); } } @@ -444,7 +444,7 @@ public void testIncorrectHeaderHandling() { headers ); - transport.incomingRequest(fakeHttpRequest, null); + transport.incomingRequest(fakeHttpRequest, new TestHttpChannel()); } try (AbstractHttpServerTransport transport = failureAssertingtHttpServerTransport(clusterSettings, Set.of("Content-Type"))) { Map> headers = new HashMap<>(); @@ -460,7 +460,7 @@ public void testIncorrectHeaderHandling() { headers ); - transport.incomingRequest(fakeHttpRequest, null); + transport.incomingRequest(fakeHttpRequest, new TestHttpChannel()); } } @@ -624,6 +624,7 @@ public HttpStats stats() { .build(); try (var httpChannel = fakeRestRequest.getHttpChannel()) { + transport.serverAcceptedChannel(httpChannel); transport.incomingRequest(fakeRestRequest.getHttpRequest(), httpChannel); } @@ -722,6 +723,7 @@ public HttpStats stats() { .withPath(path) .withHeaders(Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, Collections.singletonList(opaqueId))) .build(); + transport.serverAcceptedChannel(fakeRestRequest.getHttpChannel()); transport.incomingRequest(fakeRestRequest.getHttpRequest(), fakeRestRequest.getHttpChannel()); mockAppender.assertAllExpectationsMatched(); } finally { @@ -904,52 +906,22 @@ protected void stopInternal() {} } } - @SuppressWarnings("unchecked") - public void testSetGracefulClose() { - try (AbstractHttpServerTransport transport = new TestHttpServerTransport(Settings.EMPTY)) { - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - - HttpChannel httpChannel = mock(HttpChannel.class); - transport.incomingRequest(httpRequest, httpChannel); - - var response = ArgumentCaptor.forClass(TestHttpResponse.class); - var listener = ArgumentCaptor.forClass(ActionListener.class); - verify(httpChannel).sendResponse(response.capture(), listener.capture()); - - listener.getValue().onResponse(null); - assertThat(response.getValue().containsHeader(CONNECTION), is(false)); - verify(httpChannel, never()).close(); - - httpChannel = mock(HttpChannel.class); - transport.gracefullyCloseConnections(); - transport.incomingRequest(httpRequest, httpChannel); - verify(httpChannel).sendResponse(response.capture(), listener.capture()); - - listener.getValue().onResponse(null); - assertThat(response.getValue().headers().get(CONNECTION), containsInAnyOrder(DefaultRestChannel.CLOSE)); - verify(httpChannel).close(); - } - } - public void testStopDoesntWaitIfGraceIsZero() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(Settings.EMPTY)) { - transport.bindServer(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - + try (var noWait = LogExpectation.unexpectWait(); var transport = new TestHttpServerTransport(Settings.EMPTY)) { TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(httpRequest, httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); transport.doStop(); assertFalse(transport.testHttpServerChannel.isOpen()); assertFalse(httpChannel.isOpen()); + noWait.assertExpectationsMatched(); } } public void testStopWorksWithNoOpenRequests() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(1))) { - transport.bindServer(); - + var grace = SHORT_GRACE_PERIOD_MS; + try (var noWait = LogExpectation.unexpectedTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/") { @Override public Map> getHeaders() { @@ -965,122 +937,104 @@ public Map> getHeaders() { // TestHttpChannel will throw if closed twice, so this ensures close is not called. transport.doStop(); assertFalse(transport.testHttpServerChannel.isOpen()); + + noWait.assertExpectationsMatched(); } } - public void testStopForceClosesConnection() { - final Logger mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); - Loggers.setLevel(mockLogger, Level.WARN); - final MockLogAppender appender = new MockLogAppender(); - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(10))) { - Loggers.addAppender(mockLogger, appender); - appender.start(); - - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message", - AbstractHttpServerTransport.class.getName(), - Level.WARN, - "timed out while waiting [10]ms for clients to close connections" - ) - ); + public void testStopClosesIdleConnectionImmediately() { + var grace = SHORT_GRACE_PERIOD_MS; + try ( + var noTimeout = LogExpectation.unexpectedTimeout(grace); + TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(grace)) + ) { - transport.bindServer(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(httpRequest, httpChannel); - // idle connection + + transport.incomingRequest(testHttpRequest(), httpChannel); + // channel now idle + assertTrue(httpChannel.isOpen()); transport.doStop(); assertFalse(httpChannel.isOpen()); assertFalse(transport.testHttpServerChannel.isOpen()); + // ensure we timed out waiting for connections to close naturally - appender.assertAllExpectationsMatched(); - } finally { - appender.stop(); - Loggers.removeAppender(mockLogger, appender); + noTimeout.assertExpectationsMatched(); } } public void testStopForceClosesConnectionDuringRequest() throws Exception { - final Logger mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); - Loggers.setLevel(mockLogger, Level.WARN); - final MockLogAppender appender = new MockLogAppender(); - final var inDispatch = new CountDownLatch(1); - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(10), new HttpServerTransport.Dispatcher() { - @Override - public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { - inDispatch.countDown(); - } - - @Override - public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { - channel.sendResponse(emptyResponse(RestStatus.BAD_REQUEST)); - } - })) { - Loggers.addAppender(mockLogger, appender); - appender.start(); + var grace = SHORT_GRACE_PERIOD_MS; + TestHttpChannel httpChannel = new TestHttpChannel(); + var doneWithRequest = new CountDownLatch(1); + try ( + var timeout = LogExpectation.expectTimeout(grace); + TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(grace)) + ) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message", - AbstractHttpServerTransport.class.getName(), - Level.WARN, - "timed out while waiting [10]ms for clients to close connections" - ) - ); + httpChannel.blockSendResponse(); + var inResponse = httpChannel.notifyInSendResponse(); - transport.bindServer(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - new Thread( - () -> transport.incomingRequest(httpRequest, httpChannel), - "testStopForceClosesConnectionDuringRequest -> incomingRequest" - ).start(); - inDispatch.await(); + new Thread(() -> { + transport.incomingRequest(testHttpRequest(), httpChannel); + doneWithRequest.countDown(); + }, "testStopForceClosesConnectionDuringRequest -> incomingRequest").start(); + + inResponse.await(); + assertTrue(httpChannel.isOpen()); transport.doStop(); + assertFalse(httpChannel.isOpen()); assertFalse(transport.testHttpServerChannel.isOpen()); - assertThat(httpChannel.responses, hasSize(0)); + assertTrue(httpChannel.noResponses()); + // ensure we timed out waiting for connections to close naturally - appender.assertAllExpectationsMatched(); + timeout.assertExpectationsMatched(); } finally { - appender.stop(); - Loggers.removeAppender(mockLogger, appender); + // unblock request thread + httpChannel.allowSendResponse(); + doneWithRequest.countDown(); } } - public void testStopClosesChannelAfterRequest() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(100))) { - transport.bindServer(); + public void testStopClosesChannelAfterRequest() throws Exception { + var grace = LONG_GRACE_PERIOD_MS; + try (var noTimeout = LogExpectation.unexpectedTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); TestHttpChannel idleChannel = new TestHttpChannel(); transport.serverAcceptedChannel(idleChannel); - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), idleChannel); + transport.incomingRequest(testHttpRequest(), idleChannel); CountDownLatch stopped = new CountDownLatch(1); + var inSendResponse = httpChannel.notifyInSendResponse(); + httpChannel.blockSendResponse(); + + // one last request, should cause httpChannel to close after the request once we start shutting down. + new Thread(() -> transport.incomingRequest(testHttpRequest(), httpChannel), "testStopClosesChannelAfterRequest last request") + .start(); + + inSendResponse.await(); + new Thread(() -> { transport.doStop(); stopped.countDown(); - }).start(); + }, "testStopClosesChannelAfterRequest stopping transport").start(); - try { - assertTrue(transport.gracefullyCloseCalled.await(10, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - fail("server never called grace period"); - } + // wait until we are shutting down + assertBusy(() -> assertFalse(transport.isAcceptingConnections())); + httpChannel.allowSendResponse(); - // one last request, should cause httpChannel to close naturally now that we've set grace period - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); - assertFalse(httpChannel.isOpen()); + // wait for channel to close + assertBusy(() -> assertFalse(httpChannel.isOpen())); try { assertTrue(stopped.await(10, TimeUnit.SECONDS)); @@ -1092,42 +1046,39 @@ public void testStopClosesChannelAfterRequest() { assertFalse(idleChannel.isOpen()); assertThat(httpChannel.responses, hasSize(2)); - HttpResponse first = httpChannel.responses.get(0); - HttpResponse last = httpChannel.responses.get(1); - assertFalse(first.containsHeader(CONNECTION)); - assertTrue(last.containsHeader(CONNECTION)); - assertThat(last, instanceOf(TestHttpResponse.class)); - assertThat(((TestHttpResponse) last).headers().get(CONNECTION).get(0), equalTo(CLOSE)); + // should have closed naturally without having to wait + noTimeout.assertExpectationsMatched(); } } - public void testForceClosesOpenChannels() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(100))) { - transport.bindServer(); + public void testForceClosesOpenChannels() throws Exception { + var grace = 100; // this test waits for the entire grace, so try to keep it short + TestHttpChannel httpChannel = new TestHttpChannel(); + var doneWithRequest = new CountDownLatch(1); + try (var timeout = LogExpectation.expectTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { - TestHttpChannel httpChannel = new TestHttpChannel(true); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); CountDownLatch stopped = new CountDownLatch(1); + var inResponse = httpChannel.notifyInSendResponse(); + httpChannel.blockSendResponse(); + new Thread(() -> { - try { - assertTrue(transport.gracefullyCloseCalled.await(100, TimeUnit.MILLISECONDS)); - } catch (InterruptedException e) { - fail("server never called grace period"); - } - // one last request, will attempt to close naturally, but we are blocking it - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); + doneWithRequest.countDown(); }).start(); + inResponse.await(); + new Thread(() -> { transport.doStop(); stopped.countDown(); }).start(); try { - assertTrue(stopped.await(10, TimeUnit.SECONDS)); + assertTrue(stopped.await(2 * LONG_GRACE_PERIOD_MS, TimeUnit.MILLISECONDS)); } catch (InterruptedException e) { fail("server never stopped"); } @@ -1135,13 +1086,15 @@ public void testForceClosesOpenChannels() { assertFalse(transport.testHttpServerChannel.isOpen()); assertFalse(httpChannel.isOpen()); - assertThat(httpChannel.responses, hasSize(2)); - HttpResponse first = httpChannel.responses.get(0); - HttpResponse last = httpChannel.responses.get(1); - assertFalse(first.containsHeader(CONNECTION)); - assertTrue(last.containsHeader(CONNECTION)); - assertThat(last, instanceOf(TestHttpResponse.class)); - assertThat(((TestHttpResponse) last).headers().get(CONNECTION).get(0), equalTo(CLOSE)); + HttpResponse first = httpChannel.getResponse(); + assertTrue(httpChannel.noResponses()); // never sent the second response + assertThat(first, instanceOf(TestHttpResponse.class)); + + timeout.assertExpectationsMatched(); + } finally { + // cleanup thread + httpChannel.allowSendResponse(); + doneWithRequest.await(); } } @@ -1193,8 +1146,7 @@ public Collection getRestHeaders() { } private class TestHttpServerTransport extends AbstractHttpServerTransport { - public TestHttpChannel testHttpServerChannel = new TestHttpChannel(false); - public CountDownLatch gracefullyCloseCalled = new CountDownLatch(1); + public TestHttpChannel testHttpServerChannel = new TestHttpChannel(); TestHttpServerTransport(Settings settings, HttpServerTransport.Dispatcher dispatcher) { super( @@ -1207,6 +1159,7 @@ private class TestHttpServerTransport extends AbstractHttpServerTransport { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), Tracer.NOOP ); + bindServer(); } TestHttpServerTransport(Settings settings) { @@ -1223,12 +1176,6 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, }); } - @Override - void gracefullyCloseConnections() { - super.gracefullyCloseConnections(); - gracefullyCloseCalled.countDown(); - } - @Override protected HttpServerChannel bind(InetSocketAddress hostAddress) { testHttpServerChannel.setLocalAddress(hostAddress); @@ -1244,25 +1191,80 @@ protected void doStart() { protected void stopInternal() {} } + private Settings gracePeriod(int ms) { + return Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), new TimeValue(ms)).build(); + } + private static class TestHttpChannel implements HttpChannel, HttpServerChannel { private boolean open = true; - private int numCloses = 0; - private final CountDownLatch closeLatch; private ActionListener closeListener; private InetSocketAddress localAddress; - public List responses = new ArrayList<>(); + private final BlockingDeque responses = new LinkedBlockingDeque<>(); + + private CountDownLatch notifySendResponse = null; + private CountDownLatch blockSendResponse = null; + + public CountDownLatch notifyInSendResponse() { + synchronized (this) { + assert notifySendResponse == null : "already notifying"; + notifySendResponse = new CountDownLatch(1); + return notifySendResponse; + } + } + + public synchronized void blockSendResponse() { + synchronized (this) { + assert blockSendResponse == null : "blockSendResponse already set"; + blockSendResponse = new CountDownLatch(1); + } + } + + public synchronized void allowSendResponse() { + synchronized (this) { + assert blockSendResponse != null : "blockSendResponse null, no need to allow"; + blockSendResponse.countDown(); + } + } - TestHttpChannel() { - this(false); + public boolean noResponses() { + return responses.peek() == null; } - TestHttpChannel(boolean blockFirstClose) { - closeLatch = blockFirstClose ? new CountDownLatch(1) : null; + public HttpResponse getResponse() { + try { + return responses.takeFirst(); + } catch (InterruptedException e) { + fail("interrupted"); + } + // unreachable + return null; } @Override public void sendResponse(HttpResponse response, ActionListener listener) { + CountDownLatch notify; + CountDownLatch blockSend; + synchronized (this) { + notify = notifySendResponse; + blockSend = blockSendResponse; + } + if (notify != null) { + notify.countDown(); + synchronized (this) { + notifySendResponse = null; + } + } + if (blockSend != null) { + try { + blockSend.await(); + synchronized (this) { + blockSendResponse = null; + } + } catch (InterruptedException e) { + fail("interrupted"); + } + } responses.add(response); listener.onResponse(null); } @@ -1283,26 +1285,12 @@ public InetSocketAddress getRemoteAddress() { @Override public void close() { - if (closeLatch != null) { - boolean waitForever; - synchronized (this) { - waitForever = numCloses == 0; - numCloses++; - } - if (waitForever) { - try { - if (closeLatch.await(1, TimeUnit.SECONDS) == false) { - return; - } - } catch (InterruptedException ie) { - throw new RuntimeException(ie); - } + synchronized (this) { + if (open == false) { + throw new IllegalStateException("channel already closed!"); } + open = false; } - if (open == false) { - throw new IllegalStateException("channel already closed!"); - } - open = false; if (closeListener != null) { closeListener.onResponse(null); } @@ -1326,7 +1314,75 @@ public void addCloseListener(ActionListener listener) { } } - private Settings gracePeriod(int ms) { - return Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), new TimeValue(ms)).build(); + private static class LogExpectation implements AutoCloseable { + private final Logger mockLogger; + private final MockLogAppender appender; + private boolean checked = false; + private final int grace; + + private LogExpectation(int grace) { + mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); + Loggers.setLevel(mockLogger, Level.DEBUG); + appender = new MockLogAppender(); + Loggers.addAppender(mockLogger, appender); + appender.start(); + this.grace = grace; + } + + public static LogExpectation expectTimeout(int grace) { + return new LogExpectation(grace).timedOut(true).wait(true); + } + + public static LogExpectation unexpectedTimeout(int grace) { + return new LogExpectation(grace).timedOut(false).wait(true); + } + + public static LogExpectation unexpectWait() { + return new LogExpectation(0).wait(false); + } + + private LogExpectation timedOut(boolean expected) { + var message = "timed out while waiting [" + grace + "]ms for clients to close connections"; + var name = "message"; + var logger = AbstractHttpServerTransport.class.getName(); + var level = Level.WARN; + if (expected) { + appender.addExpectation(new MockLogAppender.SeenEventExpectation(name, logger, level, message)); + } else { + appender.addExpectation(new MockLogAppender.UnseenEventExpectation(name, logger, level, message)); + } + return this; + } + + private LogExpectation wait(boolean expected) { + var message = "closing all client connections immediately"; + var name = "message"; + var logger = AbstractHttpServerTransport.class.getName(); + var level = Level.DEBUG; + if (expected) { + appender.addExpectation(new MockLogAppender.UnseenEventExpectation(name, logger, level, message)); + } else { + appender.addExpectation(new MockLogAppender.SeenEventExpectation(name, logger, level, message)); + } + return this; + } + + public void assertExpectationsMatched() { + appender.assertAllExpectationsMatched(); + checked = true; + } + + @Override + public void close() { + appender.stop(); + Loggers.removeAppender(mockLogger, appender); + if (checked == false) { + fail("did not check expectations matched in TimedOutLogExpectation"); + } + } + } + + private TestHttpRequest testHttpRequest() { + return new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); } } diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index d050c2432025b..dc86735737a31 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -172,8 +172,7 @@ public void testHeadersSet() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); RestResponse resp = testRestResponse(); final String customHeader = "custom-header"; @@ -193,35 +192,6 @@ public void testHeadersSet() { assertEquals(resp.contentType(), headers.get(DefaultRestChannel.CONTENT_TYPE).get(0)); } - public void testCloseConnection() { - Settings settings = Settings.builder().build(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - final RestRequest request = RestRequest.request(parserConfig(), httpRequest, httpChannel); - HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); - // send a response - DefaultRestChannel channel = new DefaultRestChannel( - httpChannel, - httpRequest, - request, - bigArrays, - handlingSettings, - threadPool.getThreadContext(), - CorsHandler.fromSettings(settings), - httpTracer, - tracer, - true - ); - - RestResponse resp = testRestResponse(); - channel.sendResponse(resp); - // inspect what was written - ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(TestHttpResponse.class); - verify(httpChannel).sendResponse(responseCaptor.capture(), any()); - TestHttpResponse httpResponse = responseCaptor.getValue(); - Map> headers = httpResponse.headers(); - assertThat(headers.get(DefaultRestChannel.CONNECTION), containsInAnyOrder(DefaultRestChannel.CLOSE)); - } - public void testNormallyNoConnectionClose() { Settings settings = Settings.builder().build(); final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); @@ -237,8 +207,7 @@ public void testNormallyNoConnectionClose() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); RestResponse resp = testRestResponse(); @@ -269,8 +238,7 @@ public void testCookiesSet() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); channel.sendResponse(testRestResponse()); @@ -299,8 +267,7 @@ public void testReleaseInListener() throws IOException { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); final RestResponse response = new RestResponse( RestStatus.INTERNAL_SERVER_ERROR, @@ -368,8 +335,7 @@ public void testConnectionClose() throws Exception { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); channel.sendResponse(testRestResponse()); Class> listenerClass = (Class>) (Class) ActionListener.class; @@ -401,8 +367,7 @@ public void testResponseHeadersFiltering() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); doAnswer(invocationOnMock -> { ActionListener listener = invocationOnMock.getArgument(1); @@ -449,8 +414,7 @@ public RestRequest.Method method() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released @@ -497,8 +461,7 @@ public HttpResponse createResponse(RestStatus status, BytesReference content) { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released @@ -547,8 +510,7 @@ public void testHandleHeadRequest() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpResponse.class); { @@ -608,8 +570,7 @@ public void sendResponse(HttpResponse response, ActionListener listener) { threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(Settings.EMPTY)), new HttpTracer(), - tracer, - false + tracer ); final MockLogAppender sendingResponseMockLog = new MockLogAppender(); @@ -671,8 +632,7 @@ public void sendResponse(HttpResponse response, ActionListener listener) { threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(Settings.EMPTY)), new HttpTracer(), - tracer, - false + tracer ); MockLogAppender mockLogAppender = new MockLogAppender(); @@ -728,8 +688,7 @@ public HttpResponse createResponse(RestStatus status, ChunkedRestResponseBody co threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), new HttpTracer(), - tracer, - false + tracer ); var responseBody = new BytesArray(randomUnicodeOfLengthBetween(1, 100).getBytes(StandardCharsets.UTF_8)); @@ -799,8 +758,7 @@ private TestHttpResponse executeRequest(final Settings settings, final String or threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(settings)), httpTracer, - tracer, - false + tracer ); channel.sendResponse(testRestResponse()); diff --git a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java index efc37610de8cb..6ddbbf25c9871 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java @@ -24,6 +24,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.sameInstance; public class ExistsQueryBuilderTests extends AbstractQueryTestCase { @Override @@ -121,4 +122,20 @@ public void testFromJson() throws IOException { assertEquals(json, 42.0, parsed.boost(), 0.0001); assertEquals(json, "user", parsed.fieldName()); } + + public void testRewriteIndexQueryToMatchNone() throws IOException { + ExistsQueryBuilder query = QueryBuilders.existsQuery("does_not_exist"); + for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + } + + public void testRewriteIndexQueryToNotMatchNone() throws IOException { + ExistsQueryBuilder query = QueryBuilders.existsQuery(KEYWORD_FIELD_NAME); + for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, sameInstance(query)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 68f6be4d64675..7041f6db7f29c 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -13,8 +13,10 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; @@ -30,6 +32,7 @@ import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; @@ -37,6 +40,7 @@ import org.elasticsearch.transport.TransportService; import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.index.seqno.RetentionLeaseSyncAction.getExceptionLogLevel; @@ -192,21 +196,17 @@ public void testExceptionLogLevel() { assertEquals(Level.WARN, getExceptionLogLevel(new RuntimeException("simulated"))); assertEquals(Level.WARN, getExceptionLogLevel(new RuntimeException("simulated", new RuntimeException("simulated")))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new IndexNotFoundException("index"))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("simulated", new IndexNotFoundException("index")))); - - assertEquals(Level.DEBUG, getExceptionLogLevel(new AlreadyClosedException("index"))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("simulated", new AlreadyClosedException("index")))); - final var shardId = new ShardId("test", "_na_", 0); - - assertEquals(Level.DEBUG, getExceptionLogLevel(new IndexShardClosedException(shardId))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("simulated", new IndexShardClosedException(shardId)))); - - assertEquals(Level.DEBUG, getExceptionLogLevel(new ShardNotInPrimaryModeException(shardId, IndexShardState.CLOSED))); - assertEquals( - Level.DEBUG, - getExceptionLogLevel(new RuntimeException("simulated", new ShardNotInPrimaryModeException(shardId, IndexShardState.CLOSED))) - ); + for (final var exception : List.of( + new NodeClosedException(DiscoveryNodeUtils.create("node")), + new IndexNotFoundException(shardId.getIndexName()), + new AlreadyClosedException(shardId.getIndexName()), + new IndexShardClosedException(shardId), + new ShardNotInPrimaryModeException(shardId, IndexShardState.CLOSED), + new ReplicationOperation.RetryOnPrimaryException(shardId, "test") + )) { + assertEquals(Level.DEBUG, getExceptionLogLevel(exception)); + assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("wrapper", exception))); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index a0a11ad3eb5da..f5b8e78c16115 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -70,6 +70,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; @@ -2612,7 +2613,7 @@ public void testRestoreShard() throws IOException { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ); diff --git a/server/src/test/java/org/elasticsearch/index/translog/BufferedChecksumStreamInputTests.java b/server/src/test/java/org/elasticsearch/index/translog/BufferedChecksumStreamInputTests.java new file mode 100644 index 0000000000000..1fd3bac12f210 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/translog/BufferedChecksumStreamInputTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.translog; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.AbstractStreamTests; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.zip.CRC32; + +public class BufferedChecksumStreamInputTests extends AbstractStreamTests { + + @Override + protected StreamInput getStreamInput(BytesReference bytesReference) { + return new BufferedChecksumStreamInput(StreamInput.wrap(BytesReference.toBytes(bytesReference)), "test"); + } + + public void testChecksum() throws IOException { + int bytesSize = randomIntBetween(512, 2048); + byte[] bytes = randomByteArrayOfLength(bytesSize); + CRC32 crc32 = new CRC32(); + crc32.update(bytes); + + try (BufferedChecksumStreamInput input = new BufferedChecksumStreamInput(StreamInput.wrap(bytes), "test")) { + int read = input.read(new byte[bytesSize]); + assertEquals(bytesSize, read); + assertEquals(-1, input.read()); + assertEquals(crc32.getValue(), input.getChecksum()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 095cd38288a12..277d0472d738f 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -532,7 +532,9 @@ private IndicesClusterStateService createIndicesClusterStateService( threadPool, List.of() ); + final NodeClient client = mock(NodeClient.class); final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService( + client, threadPool, transportService, null, @@ -541,7 +543,6 @@ private IndicesClusterStateService createIndicesClusterStateService( ); final ShardStateAction shardStateAction = mock(ShardStateAction.class); final PrimaryReplicaSyncer primaryReplicaSyncer = mock(PrimaryReplicaSyncer.class); - final NodeClient client = mock(NodeClient.class); return new IndicesClusterStateService( settings, indicesService, diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 6c9f2193661e8..d5bf27fbfc7c1 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESTestCase; @@ -110,7 +111,7 @@ public void testAddSnapshots() { newSnapshot, new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -142,7 +143,7 @@ public void testInitIndices() { snapshotId.getUUID(), new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.values()), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -210,7 +211,7 @@ public void testGetSnapshotState() { snapshotId, new RepositoryData.SnapshotDetails( state, - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -392,7 +393,7 @@ public void testIndexMetaDataToRemoveAfterRemovingSnapshotWithSharing() { final RepositoryData.SnapshotDetails details = new RepositoryData.SnapshotDetails( SnapshotState.SUCCESS, - Version.CURRENT, + IndexVersion.current(), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -455,7 +456,7 @@ public static RepositoryData generateRandomRepoData() { snapshotId, new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.values()), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 1b44b7576b39d..be0f7fbea5394 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; @@ -303,7 +304,7 @@ public void testRepositoryDataDetails() throws Exception { final Consumer snapshotDetailsAsserter = snapshotDetails -> { assertThat(snapshotDetails.getSnapshotState(), equalTo(SnapshotState.PARTIAL)); - assertThat(snapshotDetails.getVersion(), equalTo(Version.CURRENT)); + assertThat(snapshotDetails.getVersion(), equalTo(IndexVersion.current())); assertThat(snapshotDetails.getStartTimeMillis(), allOf(greaterThanOrEqualTo(beforeStartTime), lessThanOrEqualTo(afterEndTime))); assertThat( snapshotDetails.getEndTimeMillis(), @@ -327,7 +328,7 @@ public void testRepositoryDataDetails() throws Exception { repositoryData.withExtraDetails( Collections.singletonMap( snapshotId, - new RepositoryData.SnapshotDetails(SnapshotState.PARTIAL, Version.CURRENT, -1, -1, null) + new RepositoryData.SnapshotDetails(SnapshotState.PARTIAL, IndexVersion.current(), -1, -1, null) ) ), repositoryData.getGenId() @@ -383,7 +384,7 @@ private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boo .collect(Collectors.toMap(Function.identity(), ind -> randomAlphaOfLength(256))); final RepositoryData.SnapshotDetails details = new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), - Version.CURRENT, + IndexVersion.current(), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index c50413fede70d..18dc565d77c07 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -125,7 +126,7 @@ public void testSnapshotAndRestore() throws IOException { ShardRouting routing = ShardRouting.newUnassigned( shardId, true, - new RecoverySource.SnapshotRecoverySource("test", new Snapshot("foo", snapshotId), Version.CURRENT, indexId), + new RecoverySource.SnapshotRecoverySource("test", new Snapshot("foo", snapshotId), IndexVersion.current(), indexId), new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, ""), ShardRouting.Role.DEFAULT ); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 521fcd91ffd48..98ed02bdd6987 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -11,9 +11,12 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.index.IndexResponse; @@ -39,6 +42,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.TimeValue; @@ -48,6 +52,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -77,6 +82,7 @@ import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.subphase.FieldAndFormat; @@ -85,6 +91,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.tasks.TaskCancelHelper; @@ -112,6 +119,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -1861,6 +1869,47 @@ public void testMinimalSearchSourceInShardRequests() { } } + public void testDfsQueryPhaseRewrite() { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.source(SearchSourceBuilder.searchSource().query(new TestRewriteCounterQueryBuilder())); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + PlainActionFuture plainActionFuture = new PlainActionFuture<>(); + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); + ReaderContext context = service.createAndPutReaderContext( + request, + indexService, + indexShard, + reader, + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + service.executeQueryPhase( + new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), + new SearchShardTask(42L, "", "", "", null, Collections.emptyMap()), + plainActionFuture + ); + + plainActionFuture.actionGet(); + assertThat(((TestRewriteCounterQueryBuilder) request.source().query()).asyncRewriteCount, equalTo(1)); + final ShardSearchContextId contextId = context.id(); + assertTrue(service.freeReaderContext(contextId)); + } + private ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { return new ReaderContext( new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()), @@ -1871,4 +1920,74 @@ private ReaderContext createReaderContext(IndexService indexService, IndexShard false ); } + + private static class TestRewriteCounterQueryBuilder extends AbstractQueryBuilder { + + final int asyncRewriteCount; + final Supplier fetched; + + TestRewriteCounterQueryBuilder() { + asyncRewriteCount = 0; + fetched = null; + } + + private TestRewriteCounterQueryBuilder(int asyncRewriteCount, Supplier fetched) { + this.asyncRewriteCount = asyncRewriteCount; + this.fetched = fetched; + } + + @Override + public String getWriteableName() { + return "test_query"; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException {} + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException {} + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + return new MatchAllDocsQuery(); + } + + @Override + protected boolean doEquals(TestRewriteCounterQueryBuilder other) { + return true; + } + + @Override + protected int doHashCode() { + return 42; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (asyncRewriteCount > 0) { + return this; + } + if (fetched != null) { + if (fetched.get() == null) { + return this; + } + assert fetched.get(); + return new TestRewriteCounterQueryBuilder(1, null); + } + if (queryRewriteContext.convertToDataRewriteContext() != null) { + SetOnce awaitingFetch = new SetOnce<>(); + queryRewriteContext.registerAsyncAction((c, l) -> { + awaitingFetch.set(true); + l.onResponse(null); + }); + return new TestRewriteCounterQueryBuilder(0, awaitingFetch::get); + } + return this; + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java index 5cf8dade53543..5671bb975118b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java @@ -127,7 +127,7 @@ public void testQueryFiltering() throws IOException { writer.addDocument(Arrays.asList(new IntPoint(FIELD_NAME, point), new SortedNumericDocValuesField(FIELD_NAME, point))); } }, agg -> { - assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample))); + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample), 0.2)); assertTrue(AggregationInspectionHelper.hasValue(agg)); }); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java index a8badfaa711f2..fdb43de69a66c 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.repositories.FilterRepository; @@ -424,7 +425,7 @@ private ClusterState addUnassignedShards(final ClusterState currentState, String final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(random()), new Snapshot("_repo", new SnapshotId(randomAlphaOfLength(5), UUIDs.randomBase64UUID(random()))), - Version.CURRENT, + IndexVersion.current(), new IndexId(indexName, UUIDs.randomBase64UUID(random())) ); diff --git a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java index a1f39c4f16be2..fee89d54d7946 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.cluster.metadata.DataStream; @@ -20,6 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -207,7 +207,7 @@ private static SnapshotInfo createSnapshotInfo(Snapshot snapshot, Boolean includ List.of(), List.of(), randomAlphaOfLengthBetween(10, 100), - Version.CURRENT, + IndexVersion.current(), randomNonNegativeLong(), randomNonNegativeLong(), shards, diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 6b23545dc5685..366bb13e609f5 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1863,7 +1863,14 @@ protected void assertSnapshotOrGenericThread() { indicesService, clusterService, threadPool, - new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService, snapshotFilesProvider), + new PeerRecoveryTargetService( + client, + threadPool, + transportService, + recoverySettings, + clusterService, + snapshotFilesProvider + ), shardStateAction, repositoriesService, searchService, diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index 9514363ee34cd..8306fded6c29d 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -8,9 +8,9 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -321,7 +321,7 @@ public static RecoverySource randomRecoverySource() { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("some_index", UUIDs.randomBase64UUID(random())) ) ); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 409ff529c9e1b..ef75f81d36ee3 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.IndexCache; @@ -1032,7 +1033,7 @@ public static boolean recoverFromStore(IndexShard newShard) { /** Recover a shard from a snapshot using a given repository **/ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot snapshot, final Repository repository) { - final Version version = Version.CURRENT; + final IndexVersion version = IndexVersion.current(); final ShardId shardId = shard.shardId(); final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID()); final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId()); diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java index d9cc17e88adb5..db5ccba2ba6a5 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java @@ -68,6 +68,10 @@ public static String randomExistingFieldName(Random random, IngestDocument inges while (randomEntry.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map map = (Map) randomEntry.getValue(); + // we have reached an empty map hence the max depth we can reach + if (map.isEmpty()) { + break; + } Map treeMap = new TreeMap<>(map); randomEntry = RandomPicks.randomFrom(random, treeMap.entrySet()); key += "." + randomEntry.getKey(); diff --git a/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java index 3e71504fbb208..6c08ff43033e6 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java @@ -10,7 +10,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.xcontent.XContentParser; @@ -23,6 +22,7 @@ public class FailBeforeCurrentVersionQueryBuilder extends DummyQueryBuilder { public static final String NAME = "fail_before_current_version"; + public static final int FUTURE_VERSION = TransportVersion.current().id() + 11_111; public FailBeforeCurrentVersionQueryBuilder(StreamInput in) throws IOException { super(in); @@ -30,15 +30,6 @@ public FailBeforeCurrentVersionQueryBuilder(StreamInput in) throws IOException { public FailBeforeCurrentVersionQueryBuilder() {} - @Override - protected void doWriteTo(StreamOutput out) { - if (out.getTransportVersion().before(TransportVersion.current())) { - throw new IllegalArgumentException( - "This query isn't serializable with transport versions before " + TransportVersion.current() - ); - } - } - public static DummyQueryBuilder fromXContent(XContentParser parser) throws IOException { DummyQueryBuilder.fromXContent(parser); return new FailBeforeCurrentVersionQueryBuilder(); @@ -53,4 +44,11 @@ public String getWriteableName() { protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { return this; } + + @Override + public TransportVersion getMinimalSupportedVersion() { + // this is what causes the failure - it always reports a version in the future, so it is never compatible with + // current or minimum CCS TransportVersion + return new TransportVersion(FUTURE_VERSION); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java index d128a65acb14f..cd54a72ccf369 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java @@ -391,6 +391,7 @@ public void testQueryWithinMultiLine() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96999") public void testQueryLinearRing() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 2380d51caf1a8..5df7537cb69e8 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.FinalizeSnapshotContext; import org.elasticsearch.repositories.RepositoriesService; @@ -367,6 +368,15 @@ protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) t } } + private static String versionString(Version version) { + if (version.before(Version.V_8_9_0)) { + // add back the "" for a json String + return "\"" + version + "\""; + } else { + return version.indexVersion.toString(); + } + } + /** * Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard * generations (the existence of which would short-circuit checks for the repo containing old version snapshots) @@ -388,7 +398,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent( JsonXContent.jsonXContent.createParser( XContentParserConfiguration.EMPTY, - Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString()) + Strings.toString(jsonBuilder).replace(IndexVersion.current().toString(), versionString(version)) ), repositoryData.getGenId(), randomBoolean() @@ -403,7 +413,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version JsonXContent.jsonXContent.createParser( XContentParserConfiguration.EMPTY, Strings.toString(snapshotInfo, ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS) - .replace(String.valueOf(Version.CURRENT.id), String.valueOf(version.id)) + .replace(String.valueOf(IndexVersion.current().id()), String.valueOf(version.id)) ) ); final BlobStoreRepository blobStoreRepository = getRepositoryOnMaster(repoName); @@ -521,7 +531,7 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map listener) { final long nowMillis = threadPool.absoluteTimeInMillis(); snapshotsDetails.put( indexName, - new RepositoryData.SnapshotDetails(SnapshotState.SUCCESS, Version.CURRENT, nowMillis, nowMillis, "") + new RepositoryData.SnapshotDetails(SnapshotState.SUCCESS, IndexVersion.current(), nowMillis, nowMillis, "") ); indexSnapshots.put(new IndexId(indexName, remoteIndices.get(indexName).getIndex().getUUID()), List.of(snapshotId)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 601ed4f9df875..40c3a35d66086 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -8,7 +8,6 @@ import org.apache.lucene.store.IOContext; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -37,6 +36,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineTestCase; @@ -516,7 +516,7 @@ protected synchronized void recoverPrimary(IndexShard primaryShard) { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java index b0cd6844a2d2c..d27d1478518bf 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -199,7 +200,7 @@ static RecoverySource.SnapshotRecoverySource newSnapshotRecoverySource() { return new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index 4561bfe426128..2156995f2e0f6 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ccr.index.engine; import org.apache.lucene.store.IOContext; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionTestUtils; @@ -23,6 +22,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; @@ -134,7 +134,7 @@ public void testRestoreShard() throws IOException { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java index 9613772a1607b..d8a3ea1d7568b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java index 22805ae8b6caa..3e8be842db386 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java index b5d46cf6bf564..fdd5662c29b9a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.downsample; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.rollup.RollupField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index 5029665663441..abd2b88a9826a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -21,7 +22,6 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java index c1307f53f575e..4930098705ac1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -18,7 +19,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java new file mode 100644 index 0000000000000..9c2b9522ac15e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.plugins.spi.NamedXContentProvider; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LenientlyParsedInferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.StrictlyParsedInferenceConfig; + +import java.util.ArrayList; +import java.util.List; + +/** + * Only the LTR named writeables and xcontent. Remove and combine with inference provider + * when feature flag is removed + */ +public class MlLTRNamedXContentProvider implements NamedXContentProvider { + + @Override + public List getNamedXContentParsers() { + List namedXContent = new ArrayList<>(); + // Lenient Inference Config + namedXContent.add( + new NamedXContentRegistry.Entry( + LenientlyParsedInferenceConfig.class, + LearnToRankConfig.NAME, + LearnToRankConfig::fromXContentLenient + ) + ); + // Strict Inference Config + namedXContent.add( + new NamedXContentRegistry.Entry( + StrictlyParsedInferenceConfig.class, + LearnToRankConfig.NAME, + LearnToRankConfig::fromXContentStrict + ) + ); + // Inference Config Update + namedXContent.add( + new NamedXContentRegistry.Entry( + InferenceConfigUpdate.class, + LearnToRankConfigUpdate.NAME, + LearnToRankConfigUpdate::fromXContentStrict + ) + ); + return namedXContent; + } + + public List getNamedWriteables() { + List namedWriteables = new ArrayList<>(); + // Inference config + namedWriteables.add( + new NamedWriteableRegistry.Entry(InferenceConfig.class, LearnToRankConfig.NAME.getPreferredName(), LearnToRankConfig::new) + ); + // Inference config update + namedWriteables.add( + new NamedWriteableRegistry.Entry( + InferenceConfigUpdate.class, + LearnToRankConfigUpdate.NAME.getPreferredName(), + LearnToRankConfigUpdate::new + ) + ); + return namedWriteables; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java index 00dfcee3e1b5d..8f2d19d85b678 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java @@ -40,4 +40,16 @@ default boolean requestingImportance() { String getResultsField(); boolean isAllocateOnly(); + + default boolean supportsIngestPipeline() { + return true; + } + + default boolean supportsPipelineAggregation() { + return true; + } + + default boolean supportsSearchRescorer() { + return false; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java new file mode 100644 index 0000000000000..48eb001fb3786 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +public class LearnToRankConfig extends RegressionConfig { + + public static final ParseField NAME = new ParseField("learn_to_rank"); + static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersion.current(); + public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); + public static final ParseField FEATURE_EXTRACTORS = new ParseField("feature_extractors"); + public static LearnToRankConfig EMPTY_PARAMS = new LearnToRankConfig(null, null); + + private static final ObjectParser LENIENT_PARSER = createParser(true); + private static final ObjectParser STRICT_PARSER = createParser(false); + + private static ObjectParser createParser(boolean lenient) { + ObjectParser parser = new ObjectParser<>( + NAME.getPreferredName(), + lenient, + LearnToRankConfig.Builder::new + ); + parser.declareInt(Builder::setNumTopFeatureImportanceValues, NUM_TOP_FEATURE_IMPORTANCE_VALUES); + parser.declareNamedObjects( + Builder::setLearnToRankFeatureExtractorBuilders, + (p, c, n) -> p.namedObject(LearnToRankFeatureExtractorBuilder.class, n, lenient), + b -> {}, + FEATURE_EXTRACTORS + ); + return parser; + } + + public static LearnToRankConfig fromXContentStrict(XContentParser parser) { + return STRICT_PARSER.apply(parser, null).build(); + } + + public static LearnToRankConfig fromXContentLenient(XContentParser parser) { + return LENIENT_PARSER.apply(parser, null).build(); + } + + private final List featureExtractorBuilders; + + public LearnToRankConfig(Integer numTopFeatureImportanceValues, List featureExtractorBuilders) { + super(DEFAULT_RESULTS_FIELD, numTopFeatureImportanceValues); + if (featureExtractorBuilders != null) { + Set featureNames = featureExtractorBuilders.stream() + .map(LearnToRankFeatureExtractorBuilder::featureName) + .collect(Collectors.toSet()); + if (featureNames.size() < featureExtractorBuilders.size()) { + throw new IllegalArgumentException( + "[" + FEATURE_EXTRACTORS.getPreferredName() + "] contains duplicate [feature_name] values" + ); + } + } + this.featureExtractorBuilders = featureExtractorBuilders == null ? List.of() : featureExtractorBuilders; + } + + public LearnToRankConfig(StreamInput in) throws IOException { + super(in); + this.featureExtractorBuilders = in.readNamedWriteableList(LearnToRankFeatureExtractorBuilder.class); + } + + public List getFeatureExtractorBuilders() { + return featureExtractorBuilders; + } + + @Override + public String getResultsField() { + return DEFAULT_RESULTS_FIELD; + } + + @Override + public boolean isAllocateOnly() { + return false; + } + + @Override + public boolean supportsIngestPipeline() { + return false; + } + + @Override + public boolean supportsPipelineAggregation() { + return false; + } + + @Override + public boolean supportsSearchRescorer() { + return true; + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeNamedWriteableList(featureExtractorBuilders); + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), getNumTopFeatureImportanceValues()); + if (featureExtractorBuilders.isEmpty() == false) { + NamedXContentObjectHelper.writeNamedObjects( + builder, + params, + true, + FEATURE_EXTRACTORS.getPreferredName(), + featureExtractorBuilders + ); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + LearnToRankConfig that = (LearnToRankConfig) o; + return Objects.equals(featureExtractorBuilders, that.featureExtractorBuilders); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), featureExtractorBuilders); + } + + @Override + public boolean isTargetTypeSupported(TargetType targetType) { + return TargetType.REGRESSION.equals(targetType); + } + + @Override + public Version getMinimalSupportedNodeVersion() { + return Version.CURRENT; + } + + @Override + public TransportVersion getMinimalSupportedTransportVersion() { + return MIN_SUPPORTED_TRANSPORT_VERSION; + } + + public static class Builder { + private Integer numTopFeatureImportanceValues; + private List learnToRankFeatureExtractorBuilders; + + Builder() {} + + Builder(LearnToRankConfig config) { + this.numTopFeatureImportanceValues = config.getNumTopFeatureImportanceValues(); + this.learnToRankFeatureExtractorBuilders = config.featureExtractorBuilders; + } + + public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + return this; + } + + public Builder setLearnToRankFeatureExtractorBuilders( + List learnToRankFeatureExtractorBuilders + ) { + this.learnToRankFeatureExtractorBuilders = learnToRankFeatureExtractorBuilders; + return this; + } + + public LearnToRankConfig build() { + return new LearnToRankConfig(numTopFeatureImportanceValues, learnToRankFeatureExtractorBuilders); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java new file mode 100644 index 0000000000000..8030b31f396a1 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java @@ -0,0 +1,228 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig.FEATURE_EXTRACTORS; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig.NUM_TOP_FEATURE_IMPORTANCE_VALUES; + +public class LearnToRankConfigUpdate implements InferenceConfigUpdate, NamedXContentObject { + + public static final ParseField NAME = LearnToRankConfig.NAME; + + public static LearnToRankConfigUpdate EMPTY_PARAMS = new LearnToRankConfigUpdate(null, null); + + public static LearnToRankConfigUpdate fromConfig(LearnToRankConfig config) { + return new LearnToRankConfigUpdate(config.getNumTopFeatureImportanceValues(), config.getFeatureExtractorBuilders()); + } + + private static final ObjectParser STRICT_PARSER = createParser(false); + + private static ObjectParser createParser(boolean lenient) { + ObjectParser parser = new ObjectParser<>( + NAME.getPreferredName(), + lenient, + LearnToRankConfigUpdate.Builder::new + ); + parser.declareInt(LearnToRankConfigUpdate.Builder::setNumTopFeatureImportanceValues, NUM_TOP_FEATURE_IMPORTANCE_VALUES); + parser.declareNamedObjects( + LearnToRankConfigUpdate.Builder::setFeatureExtractorBuilders, + (p, c, n) -> p.namedObject(LearnToRankFeatureExtractorBuilder.class, n, false), + b -> {}, + FEATURE_EXTRACTORS + ); + return parser; + } + + public static LearnToRankConfigUpdate fromXContentStrict(XContentParser parser) { + return STRICT_PARSER.apply(parser, null).build(); + } + + private final Integer numTopFeatureImportanceValues; + private final List featureExtractorBuilderList; + + public LearnToRankConfigUpdate( + Integer numTopFeatureImportanceValues, + List featureExtractorBuilders + ) { + if (numTopFeatureImportanceValues != null && numTopFeatureImportanceValues < 0) { + throw new IllegalArgumentException( + "[" + NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName() + "] must be greater than or equal to 0" + ); + } + if (featureExtractorBuilders != null) { + Set featureNames = featureExtractorBuilders.stream() + .map(LearnToRankFeatureExtractorBuilder::featureName) + .collect(Collectors.toSet()); + if (featureNames.size() < featureExtractorBuilders.size()) { + throw new IllegalArgumentException( + "[" + FEATURE_EXTRACTORS.getPreferredName() + "] contains duplicate [feature_name] values" + ); + } + } + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + this.featureExtractorBuilderList = featureExtractorBuilders == null ? List.of() : featureExtractorBuilders; + } + + public LearnToRankConfigUpdate(StreamInput in) throws IOException { + this.numTopFeatureImportanceValues = in.readOptionalVInt(); + this.featureExtractorBuilderList = in.readNamedWriteableList(LearnToRankFeatureExtractorBuilder.class); + } + + public Integer getNumTopFeatureImportanceValues() { + return numTopFeatureImportanceValues; + } + + @Override + public String getResultsField() { + return DEFAULT_RESULTS_FIELD; + } + + @Override + public InferenceConfigUpdate.Builder, ? extends InferenceConfigUpdate> newBuilder() { + return new Builder().setNumTopFeatureImportanceValues(numTopFeatureImportanceValues); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(numTopFeatureImportanceValues); + out.writeNamedWriteableList(featureExtractorBuilderList); + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return LearnToRankConfig.MIN_SUPPORTED_TRANSPORT_VERSION; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (numTopFeatureImportanceValues != null) { + builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), numTopFeatureImportanceValues); + } + if (featureExtractorBuilderList.isEmpty() == false) { + NamedXContentObjectHelper.writeNamedObjects( + builder, + params, + true, + FEATURE_EXTRACTORS.getPreferredName(), + featureExtractorBuilderList + ); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LearnToRankConfigUpdate that = (LearnToRankConfigUpdate) o; + return Objects.equals(this.numTopFeatureImportanceValues, that.numTopFeatureImportanceValues) + && Objects.equals(this.featureExtractorBuilderList, that.featureExtractorBuilderList); + } + + @Override + public int hashCode() { + return Objects.hash(numTopFeatureImportanceValues, featureExtractorBuilderList); + } + + @Override + public LearnToRankConfig apply(InferenceConfig originalConfig) { + if (originalConfig instanceof LearnToRankConfig == false) { + throw ExceptionsHelper.badRequestException( + "Inference config of type [{}] can not be updated with a inference request of type [{}]", + originalConfig.getName(), + getName() + ); + } + + LearnToRankConfig ltrConfig = (LearnToRankConfig) originalConfig; + if (isNoop(ltrConfig)) { + return ltrConfig; + } + LearnToRankConfig.Builder builder = new LearnToRankConfig.Builder(ltrConfig); + if (numTopFeatureImportanceValues != null) { + builder.setNumTopFeatureImportanceValues(numTopFeatureImportanceValues); + } + if (featureExtractorBuilderList.isEmpty() == false) { + Map existingExtractors = ltrConfig.getFeatureExtractorBuilders() + .stream() + .collect(Collectors.toMap(LearnToRankFeatureExtractorBuilder::featureName, f -> f)); + featureExtractorBuilderList.forEach(f -> existingExtractors.put(f.featureName(), f)); + builder.setLearnToRankFeatureExtractorBuilders(new ArrayList<>(existingExtractors.values())); + } + return builder.build(); + } + + @Override + public boolean isSupported(InferenceConfig inferenceConfig) { + return inferenceConfig instanceof LearnToRankConfig; + } + + boolean isNoop(LearnToRankConfig originalConfig) { + return (numTopFeatureImportanceValues == null || originalConfig.getNumTopFeatureImportanceValues() == numTopFeatureImportanceValues) + && (featureExtractorBuilderList.isEmpty() + || Objects.equals(originalConfig.getFeatureExtractorBuilders(), featureExtractorBuilderList)); + } + + public static class Builder implements InferenceConfigUpdate.Builder { + private Integer numTopFeatureImportanceValues; + private List featureExtractorBuilderList; + + @Override + public Builder setResultsField(String resultsField) { + assert false : "results field should never be set in ltr config"; + return this; + } + + public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + return this; + } + + public Builder setFeatureExtractorBuilders(List featureExtractorBuilderList) { + this.featureExtractorBuilderList = featureExtractorBuilderList; + return this; + } + + @Override + public LearnToRankConfigUpdate build() { + return new LearnToRankConfigUpdate(numTopFeatureImportanceValues, featureExtractorBuilderList); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java index c39d6103b772b..fe67fa52d8606 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java @@ -29,4 +29,19 @@ public interface NlpConfig extends LenientlyParsedInferenceConfig, StrictlyParse * @return the model tokenization parameters */ Tokenization getTokenization(); + + @Override + default boolean supportsIngestPipeline() { + return true; + } + + @Override + default boolean supportsPipelineAggregation() { + return false; + } + + @Override + default boolean supportsSearchRescorer() { + return false; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ltr/LearnToRankFeatureExtractorBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ltr/LearnToRankFeatureExtractorBuilder.java new file mode 100644 index 0000000000000..590ff7a7422af --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ltr/LearnToRankFeatureExtractorBuilder.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; + +public interface LearnToRankFeatureExtractorBuilder extends NamedXContentObject, NamedWriteable { + + ParseField FEATURE_NAME = new ParseField("feature_name"); + + /** + * @return The input feature that this extractor satisfies + */ + String featureName(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java index ccf80afd0ae41..49a32388845b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack.core.rollup.action; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.rollup.RollupField; import java.util.Map; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index e9b4a27ea7415..127a84b103ecf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -6,33 +6,79 @@ */ package org.elasticsearch.xpack.core.security.action.user; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.security.authc.Authentication; import java.io.IOException; +import java.util.Objects; -public class AuthenticateResponse extends ActionResponse { +public class AuthenticateResponse extends ActionResponse implements ToXContent { - private Authentication authentication; + public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersion.V_8_500_028; + + private final Authentication authentication; + private final boolean operator; public AuthenticateResponse(StreamInput in) throws IOException { super(in); authentication = new Authentication(in); + if (in.getTransportVersion().onOrAfter(VERSION_OPERATOR_FIELD)) { + operator = in.readBoolean(); + } else { + operator = false; + } } - public AuthenticateResponse(Authentication authentication) { - this.authentication = authentication; + public AuthenticateResponse(Authentication authentication, boolean operator) { + this.authentication = Objects.requireNonNull(authentication); + this.operator = operator; } public Authentication authentication() { return authentication; } + public boolean isOperator() { + return operator; + } + @Override public void writeTo(StreamOutput out) throws IOException { authentication.writeTo(out); + if (out.getTransportVersion().onOrAfter(VERSION_OPERATOR_FIELD)) { + out.writeBoolean(operator); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + authentication.toXContentFragment(builder); + if (this.operator) { + builder.field("operator", true); + } + return builder.endObject(); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AuthenticateResponse that = (AuthenticateResponse) o; + return this.operator == that.operator && this.authentication.equals(that.authentication); + } + + @Override + public int hashCode() { + return Objects.hash(authentication, operator); + } } diff --git a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json index 5d2cad8f951d9..130b2fc59a5b3 100644 --- a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json +++ b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json @@ -18,9 +18,6 @@ "actions": { "set_priority": { "priority": 50 - }, - "forcemerge": { - "max_num_segments": 1 } } }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java index adaddaeb4700f..58133ecd1aa9d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; @@ -358,7 +359,7 @@ public void onFailure(Exception e) { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("src_only", snapshotId), - Version.CURRENT, + IndexVersion.current(), indexId ) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index bcf1362ba0e88..6e7383ea314f6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -51,6 +50,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -594,8 +594,8 @@ public Map getInternalRepositories( } @Override - public BiConsumer addPreRestoreVersionCheck() { - List> checks = filterPlugins(RepositoryPlugin.class).stream() + public BiConsumer addPreRestoreVersionCheck() { + List> checks = filterPlugins(RepositoryPlugin.class).stream() .map(RepositoryPlugin::addPreRestoreVersionCheck) .filter(Objects::nonNull) .collect(Collectors.toList()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java index d18b17791ef2d..ffa9334551360 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java @@ -74,14 +74,16 @@ static InferenceConfig mutateForVersion(NlpConfig inferenceConfig, TransportVers protected NamedXContentRegistry xContentRegistry() { List namedXContent = new ArrayList<>(); namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); return new NamedXContentRegistry(namedXContent); } @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); - return new NamedWriteableRegistry(entries); + List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + return new NamedWriteableRegistry(namedWriteables); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java new file mode 100644 index 0000000000000..75923354eaa0b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.InferenceConfigItemTestCase; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class LearnToRankConfigTests extends InferenceConfigItemTestCase { + private boolean lenient; + + public static LearnToRankConfig randomLearnToRankConfig() { + return new LearnToRankConfig( + randomBoolean() ? null : randomIntBetween(0, 10), + randomBoolean() + ? null + : Stream.generate(() -> new TestValueExtractor(randomAlphaOfLength(10))).limit(randomInt(5)).collect(Collectors.toList()) + ); + } + + @Before + public void chooseStrictOrLenient() { + lenient = randomBoolean(); + } + + @Override + protected LearnToRankConfig createTestInstance() { + return randomLearnToRankConfig(); + } + + @Override + protected LearnToRankConfig mutateInstance(LearnToRankConfig instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> field.isEmpty() == false; + } + + @Override + protected Writeable.Reader instanceReader() { + return LearnToRankConfig::new; + } + + @Override + protected LearnToRankConfig doParseInstance(XContentParser parser) throws IOException { + return lenient ? LearnToRankConfig.fromXContentLenient(parser) : LearnToRankConfig.fromXContentStrict(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return lenient; + } + + @Override + protected LearnToRankConfig mutateInstanceForVersion(LearnToRankConfig instance, TransportVersion version) { + return instance; + } + + public void testDuplicateFeatureNames() { + List featureExtractorBuilderList = List.of( + new TestValueExtractor("foo"), + new TestValueExtractor("foo") + ); + expectThrows( + IllegalArgumentException.class, + () -> new LearnToRankConfig(randomBoolean() ? null : randomIntBetween(0, 10), featureExtractorBuilderList) + ); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List namedXContent = new ArrayList<>(); + namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + namedXContent.add( + new NamedXContentRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + TestValueExtractor.NAME, + TestValueExtractor::fromXContent + ) + ); + return new NamedXContentRegistry(namedXContent); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + TestValueExtractor.NAME.getPreferredName(), + TestValueExtractor::new + ) + ); + return new NamedWriteableRegistry(namedWriteables); + } + + static class TestValueExtractor implements LearnToRankFeatureExtractorBuilder { + public static final ParseField NAME = new ParseField("test"); + private final String featureName; + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME.getPreferredName(), + a -> new TestValueExtractor((String) a[0]) + ); + private static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( + NAME.getPreferredName(), + true, + a -> new TestValueExtractor((String) a[0]) + ); + static { + PARSER.declareString(constructorArg(), FEATURE_NAME); + LENIENT_PARSER.declareString(constructorArg(), FEATURE_NAME); + } + + public static TestValueExtractor fromXContent(XContentParser parser, Object context) { + boolean lenient = Boolean.TRUE.equals(context); + return lenient ? LENIENT_PARSER.apply(parser, null) : PARSER.apply(parser, null); + } + + TestValueExtractor(StreamInput in) throws IOException { + this.featureName = in.readString(); + } + + TestValueExtractor(String featureName) { + this.featureName = featureName; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FEATURE_NAME.getPreferredName(), featureName); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(featureName); + } + + @Override + public String featureName() { + return featureName; + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestValueExtractor that = (TestValueExtractor) o; + return Objects.equals(featureName, that.featureName); + } + + @Override + public int hashCode() { + return Objects.hash(featureName); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java new file mode 100644 index 0000000000000..97c0358209faa --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigTests.randomLearnToRankConfig; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.is; + +public class LearnToRankConfigUpdateTests extends AbstractBWCSerializationTestCase { + + public static LearnToRankConfigUpdate randomLearnToRankConfigUpdate() { + return new LearnToRankConfigUpdate(randomBoolean() ? null : randomIntBetween(0, 10), null); + } + + public void testApply() { + LearnToRankConfig originalConfig = randomLearnToRankConfig(); + assertThat(originalConfig, equalTo(LearnToRankConfigUpdate.EMPTY_PARAMS.apply(originalConfig))); + assertThat( + new LearnToRankConfig.Builder(originalConfig).setNumTopFeatureImportanceValues(5).build(), + equalTo(new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(5).build().apply(originalConfig)) + ); + assertThat( + new LearnToRankConfig.Builder(originalConfig).setNumTopFeatureImportanceValues(1).build(), + equalTo(new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(1).build().apply(originalConfig)) + ); + + LearnToRankFeatureExtractorBuilder extractorBuilder = new LearnToRankConfigTests.TestValueExtractor("foo"); + LearnToRankFeatureExtractorBuilder extractorBuilder2 = new LearnToRankConfigTests.TestValueExtractor("bar"); + + LearnToRankConfig config = new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(1) + .setFeatureExtractorBuilders(List.of(extractorBuilder2, extractorBuilder)) + .build() + .apply(originalConfig); + assertThat(config.getNumTopFeatureImportanceValues(), equalTo(1)); + assertThat(extractorBuilder2, is(in(config.getFeatureExtractorBuilders()))); + assertThat(extractorBuilder, is(in(config.getFeatureExtractorBuilders()))); + } + + @Override + protected LearnToRankConfigUpdate createTestInstance() { + return randomLearnToRankConfigUpdate(); + } + + @Override + protected LearnToRankConfigUpdate mutateInstance(LearnToRankConfigUpdate instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + + @Override + protected Writeable.Reader instanceReader() { + return LearnToRankConfigUpdate::new; + } + + @Override + protected LearnToRankConfigUpdate doParseInstance(XContentParser parser) throws IOException { + return LearnToRankConfigUpdate.fromXContentStrict(parser); + } + + @Override + protected LearnToRankConfigUpdate mutateInstanceForVersion(LearnToRankConfigUpdate instance, TransportVersion version) { + return instance; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List namedXContent = new ArrayList<>(); + namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + namedXContent.add( + new NamedXContentRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + LearnToRankConfigTests.TestValueExtractor.NAME, + LearnToRankConfigTests.TestValueExtractor::fromXContent + ) + ); + return new NamedXContentRegistry(namedXContent); + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + LearnToRankConfigTests.TestValueExtractor.NAME.getPreferredName(), + LearnToRankConfigTests.TestValueExtractor::new + ) + ); + return new NamedWriteableRegistry(namedWriteables); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return writableRegistry(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java index 67f081bf7bf0b..0f0ed022b3ca3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java @@ -6,11 +6,11 @@ */ package org.elasticsearch.xpack.core.rollup; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import java.io.IOException; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponseTests.java new file mode 100644 index 0000000000000..a211a4cd47620 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponseTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; + +import java.io.IOException; + +public class AuthenticateResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return AuthenticateResponse::new; + } + + @Override + protected AuthenticateResponse createTestInstance() { + return new AuthenticateResponse(AuthenticationTestHelper.builder().build(), randomBoolean()); + } + + @Override + protected AuthenticateResponse mutateInstance(AuthenticateResponse instance) throws IOException { + if (randomBoolean()) { + return new AuthenticateResponse( + randomValueOtherThanMany(instance::equals, () -> AuthenticationTestHelper.builder().build()), + instance.isOperator() + ); + } else { + return new AuthenticateResponse(instance.authentication(), instance.isOperator() == false); + } + } +} diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java index e201b1e5ed23a..671889b207c77 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java @@ -33,7 +33,7 @@ public void setupModelAndData() throws IOException { "description": "super complex model for tests", "input": {"field_names": ["cost", "product"]}, "inference_config": { - "regression": { + "learn_to_rank": { } }, "definition": { @@ -146,7 +146,6 @@ public void testLtrSimple() throws Exception { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "Fix DFS rewrite for rescorers") public void testLtrSimpleDFS() throws Exception { Response searchResponse = searchDfs(""" { @@ -222,8 +221,7 @@ public void testLtrCanMatch() throws Exception { Map response = responseAsMap(searchResponse); assertThat(response.toString(), (List) XContentMapValues.extractValue("hits.hits._score", response), contains(17.0, 17.0)); - // TODO add DFS support for rescorer rewrites - /* searchResponse = searchCanMatch(""" + searchResponse = searchCanMatch(""" { "query": { "match": { "product": { "query": "TV"}} }, @@ -238,7 +236,6 @@ public void testLtrCanMatch() throws Exception { response = responseAsMap(searchResponse); assertThat(response.toString(), (List) XContentMapValues.extractValue("hits.hits._score", response), contains(17.0, 17.0)); - */ } private void indexData(String data) throws IOException { diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index 4a6c4c9b64428..b28e6bec462b9 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -181,6 +181,7 @@ tasks.named("yamlRestTest").configure { 'ml/inference_crud/Test put nlp model config with vocabulary set', 'ml/inference_crud/Test put model model aliases with nlp model', 'ml/inference_processor/Test create processor with missing mandatory fields', + 'ml/inference_rescore/Test rescore with missing model', 'ml/inference_stats_crud/Test get stats given missing trained model', 'ml/inference_stats_crud/Test get stats given expression without matches and allow_no_match is false', 'ml/jobs_crud/Test cannot create job with model snapshot id set', diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceRescorerIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceRescorerIT.java new file mode 100644 index 0000000000000..1748d8a7f94ac --- /dev/null +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceRescorerIT.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class InferenceRescorerIT extends InferenceTestCase { + + private static final String MODEL_ID = "ltr-model"; + private static final String INDEX_NAME = "store"; + + @Before + public void setupModelAndData() throws IOException { + putRegressionModel(MODEL_ID, """ + { + "description": "super complex model for tests", + "input": {"field_names": ["cost", "product"]}, + "inference_config": { + "learn_to_rank": { + } + }, + "definition": { + "preprocessors" : [{ + "one_hot_encoding": { + "field": "product", + "hot_map": { + "TV": "type_tv", + "VCR": "type_vcr", + "Laptop": "type_laptop" + } + } + }], + "trained_model": { + "ensemble": { + "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop"], + "target_type": "regression", + "trained_models": [ + { + "tree": { + "feature_names": [ + "cost" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 400, + "decision_type": "lte", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 5.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 + } + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "type_tv" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 12.0 + } + ], + "target_type": "regression" + } + } + ] + } + } + } + }"""); + createIndex(INDEX_NAME, Settings.EMPTY, """ + "properties":{ + "product":{"type": "keyword"}, + "cost":{"type": "integer"}}"""); + indexData("{ \"product\": \"TV\", \"cost\": 300}"); + indexData("{ \"product\": \"TV\", \"cost\": 400}"); + indexData("{ \"product\": \"TV\", \"cost\": 600}"); + indexData("{ \"product\": \"VCR\", \"cost\": 15}"); + indexData("{ \"product\": \"VCR\", \"cost\": 350}"); + indexData("{ \"product\": \"VCR\", \"cost\": 580}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 100}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 300}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 500}"); + adminClient().performRequest(new Request("POST", INDEX_NAME + "/_refresh")); + } + + public void testInferenceRescore() throws Exception { + Request request = new Request("GET", "store/_search?size=3"); + request.setJsonEntity(""" + { + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(17.0, 17.0, 14.0)); + request.setJsonEntity(""" + { + "query": {"term": {"product": "Laptop"}}, + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(6.0, 6.0, 3.0)); + } + + public void testInferenceRescoreSmallWindow() throws Exception { + Request request = new Request("GET", "store/_search?size=5"); + request.setJsonEntity(""" + { + "rescore": { + "window_size": 2, + "inference": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(17.0, 17.0, 1.0, 1.0, 1.0)); + } + + public void testInferenceRescorerWithChainedRescorers() throws IOException { + Request request = new Request("GET", "store/_search?size=5"); + request.setJsonEntity(""" + { + "rescore": [ + { + "window_size": 4, + "query": { "rescore_query":{ "script_score": {"query": {"match_all": {}}, "script": {"source": "return 4"}}}} + }, + { + "window_size": 3, + "inference": { "model_id": "ltr-model" } + }, + { + "window_size": 2, + "query": { "rescore_query": { "script_score": {"query": {"match_all": {}}, "script": {"source": "return 20"}}}} + } + ] + }"""); + assertHitScores(client().performRequest(request), List.of(37.0, 37.0, 14.0, 5.0, 1.0)); + } + + private void indexData(String data) throws IOException { + Request request = new Request("POST", INDEX_NAME + "/_doc"); + request.setJsonEntity(data); + client().performRequest(request); + } + + @SuppressWarnings("unchecked") + private static void assertHitScores(Response response, List expectedScores) throws IOException { + assertThat((List) XContentMapValues.extractValue("hits.hits._score", responseAsMap(response)), equalTo(expectedScores)); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 88ae63441aefe..935dd5ef2fc6d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -190,6 +190,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import org.elasticsearch.xpack.core.ml.dataframe.stats.AnalysisStatsNamedWriteablesProvider; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -1755,6 +1756,10 @@ public List getNamedXContent() { ) ); namedXContent.addAll(new CorrelationNamedContentProvider().getNamedXContentParsers()); + // LTR Combine with Inference named content provider when feature flag is removed + if (InferenceRescorerFeature.isEnabled()) { + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); + } return namedXContent; } @@ -1839,7 +1844,10 @@ public List getNamedWriteables() { namedWriteables.addAll(MlAutoscalingNamedWritableProvider.getNamedWriteables()); namedWriteables.addAll(new CorrelationNamedContentProvider().getNamedWriteables()); namedWriteables.addAll(new ChangePointNamedContentProvider().getNamedWriteables()); - + // LTR Combine with Inference named content provider when feature flag is removed + if (InferenceRescorerFeature.isEnabled()) { + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + } return namedWriteables; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java index 62e747d1d443d..e9478bc2462da 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java @@ -129,7 +129,7 @@ protected void taskOperation( DataFrameAnalyticsTask task, ActionListener> listener ) { - logger.debug("Get stats for running task [{}]", task.getParams().getId()); + logger.trace("Get stats for running task [{}]", task.getParams().getId()); ActionListener updateProgressListener = ActionListener.wrap(aVoid -> { StatsHolder statsHolder = task.getStatsHolder(); @@ -160,7 +160,7 @@ protected void doExecute( ActionListener listener ) { TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); - logger.debug("Get stats for data frame analytics [{}]", request.getId()); + logger.trace("Get stats for data frame analytics [{}]", request.getId()); ActionListener getResponseListener = ActionListener.wrap(getResponse -> { List expandedIds = getResponse.getResources() @@ -249,7 +249,7 @@ static List determineStoppedConfigs(List listener) { - logger.debug("[{}] Gathering stats for stopped task", config.getId()); + logger.trace("[{}] Gathering stats for stopped task", config.getId()); RetrievedStatsHolder retrievedStatsHolder = new RetrievedStatsHolder( ProgressTracker.fromZeroes(config.getAnalysis().getProgressPhases(), config.getAnalysis().supportsInference()).report() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index 544ce742521be..d327430a5bfa6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -64,7 +64,7 @@ public TransportGetDatafeedsStatsAction( @Override protected void doExecute(Task task, Request request, ActionListener listener) { - logger.debug(() -> "[" + request.getDatafeedId() + "] get stats for datafeed"); + logger.trace(() -> "[" + request.getDatafeedId() + "] get stats for datafeed"); ClusterState state = clusterService.state(); final PersistentTasksCustomMetadata tasksInProgress = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); final Response.Builder responseBuilder = new Response.Builder(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index 038d5fa0b610b..fa63f5f9d78c1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -95,7 +95,7 @@ public TransportGetJobsStatsAction( @Override protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener finalListener) { - logger.debug("Get stats for job [{}]", request.getJobId()); + logger.trace("Get stats for job [{}]", request.getJobId()); TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); ClusterState state = clusterService.state(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java index e416f981dda64..ec25edbf513ab 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java @@ -264,7 +264,7 @@ public InferencePipelineAggregationBuilder rewrite(QueryRewriteContext context) SetOnce loadedModel = new SetOnce<>(); BiConsumer> modelLoadAction = (client, listener) -> modelLoadingService.get() - .getModelForSearch(modelId, listener.delegateFailure((delegate, localModel) -> { + .getModelForAggregation(modelId, listener.delegateFailure((delegate, localModel) -> { loadedModel.set(localModel); boolean isLicensed = localModel.getLicenseLevel() == License.OperationMode.BASIC diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java index 9140faf50ead8..4e64b3e2f8f2b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.license.License; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; @@ -59,6 +60,7 @@ public class LocalModel implements Closeable { private final CircuitBreaker trainedModelCircuitBreaker; private final AtomicLong referenceCount; private final long cachedRamBytesUsed; + private final TrainedModelType trainedModelType; LocalModel( String modelId, @@ -68,6 +70,7 @@ public class LocalModel implements Closeable { Map defaultFieldMap, InferenceConfig modelInferenceConfig, License.OperationMode licenseLevel, + TrainedModelType trainedModelType, TrainedModelStatsService trainedModelStatsService, CircuitBreaker trainedModelCircuitBreaker ) { @@ -85,6 +88,7 @@ public class LocalModel implements Closeable { this.licenseLevel = licenseLevel; this.trainedModelCircuitBreaker = trainedModelCircuitBreaker; this.referenceCount = new AtomicLong(1); + this.trainedModelType = trainedModelType; } long ramBytesUsed() { @@ -94,6 +98,14 @@ long ramBytesUsed() { return cachedRamBytesUsed; } + public InferenceConfig getInferenceConfig() { + return inferenceConfig; + } + + TrainedModelType getTrainedModelType() { + return trainedModelType; + } + public String getModelId() { return modelId; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index 4486c2fd23944..da2f97e283f2a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.ingest.IngestMetadata; @@ -36,6 +37,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; @@ -51,12 +53,14 @@ import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; import java.util.ArrayDeque; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -110,11 +114,71 @@ public class ModelLoadingService implements ClusterStateListener { Setting.Property.NodeScope ); - // The feature requesting the model + /** + * The cached model consumer. Various consumers dictate the model's usage and context + */ public enum Consumer { - PIPELINE, - SEARCH, - INTERNAL + PIPELINE() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + return config == null || config.supportsIngestPipeline(); + } + + @Override + public String exceptionName() { + return "ingest"; + } + }, + SEARCH_AGGS() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + return config == null || config.supportsPipelineAggregation(); + } + + @Override + public String exceptionName() { + return "search(aggregation)"; + } + }, + SEARCH_RESCORER() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + // Null configs imply creation via target type. This is for BWC for very old models + // Consequently, if the config is null, we don't support LTR with them. + return config != null && config.supportsSearchRescorer(); + } + + @Override + public String exceptionName() { + return "search(rescorer)"; + } + }, + INTERNAL() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + return true; + } + + @Override + public String exceptionName() { + return "internal"; + } + }; + + /** + * @param config The inference config for the model. It may be null for very old Regression or classification models + * @return Is this configuration type supported within this cache context? + */ + public abstract boolean inferenceConfigSupported(@Nullable InferenceConfig config); + + /** + * @return The cache context name to use if an exception must be thrown due to the config not being supported + */ + public abstract String exceptionName(); + + public boolean isAnyOf(Consumer... consumers) { + return Arrays.stream(consumers).anyMatch(c -> this == c); + } } private static class ModelAndConsumer { @@ -219,13 +283,23 @@ public void getModelForInternalInference(String modelId, ActionListener modelActionListener) { + getModel(modelId, Consumer.SEARCH_AGGS, null, modelActionListener); + } + + /** + * Load the model for use by at search for rescoring. Models requested by search are always cached. * * @param modelId the model to get * @param modelActionListener the listener to alert when the model has been retrieved */ - public void getModelForSearch(String modelId, ActionListener modelActionListener) { - getModel(modelId, Consumer.SEARCH, null, modelActionListener); + public void getModelForLearnToRank(String modelId, ActionListener modelActionListener) { + getModel(modelId, Consumer.SEARCH_RESCORER, null, modelActionListener); } /** @@ -259,6 +333,18 @@ private void getModel(String modelIdOrAlias, Consumer consumer, TaskId parentTas final String modelId = modelAliasToId.getOrDefault(modelIdOrAlias, modelIdOrAlias); ModelAndConsumer cachedModel = localModelCache.get(modelId); if (cachedModel != null) { + // Even if the model is already cached, we don't want to use the model in an unsupported task + if (consumer.inferenceConfigSupported(cachedModel.model.getInferenceConfig()) == false) { + modelActionListener.onFailure( + modelUnsupportedInUsageContext( + modelId, + cachedModel.model.getTrainedModelType(), + cachedModel.model.getInferenceConfig(), + consumer + ) + ); + return; + } cachedModel.consumers.add(consumer); try { cachedModel.model.acquire(); @@ -314,7 +400,6 @@ private boolean loadModelIfNecessary( localModelToNotifyListener.set(cachedModel.model); return true; } - // Add the listener to the queue if the model is loading Queue> listeners = loadingListeners.computeIfPresent( modelId, @@ -330,7 +415,8 @@ private boolean loadModelIfNecessary( // The model is not currently being loaded (indicated by listeners check above). // So start a new load outside of the synchronized block. - if (Consumer.SEARCH != consumer && referencedModels.contains(modelId) == false) { + if (consumer.isAnyOf(Consumer.SEARCH_AGGS, Consumer.SEARCH_RESCORER) == false + && referencedModels.contains(modelId) == false) { // The model is requested by a pipeline but not referenced by any ingest pipelines. // This means it is a simulate call and the model should not be cached logger.trace( @@ -368,19 +454,19 @@ private void loadModel(String modelId, Consumer consumer) { // We don't want to cancel the loading if only ONE of them stops listening or closes connection // TODO Is there a way to only signal a cancel if all the listener tasks cancel??? provider.getTrainedModel(modelId, GetTrainedModelsAction.Includes.empty(), null, ActionListener.wrap(trainedModelConfig -> { - if (trainedModelConfig.isAllocateOnly()) { - if (consumer == Consumer.SEARCH) { - handleLoadFailure( + if (consumer.inferenceConfigSupported(trainedModelConfig.getInferenceConfig()) == false) { + handleLoadFailure( + modelId, + modelUnsupportedInUsageContext( modelId, - new ElasticsearchStatusException( - "Trained model [{}] with type [{}] is currently not usable in search.", - RestStatus.BAD_REQUEST, - modelId, - trainedModelConfig.getModelType() - ) - ); - return; - } + trainedModelConfig.getModelType(), + trainedModelConfig.getInferenceConfig(), + consumer + ) + ); + return; + } + if (trainedModelConfig.isAllocateOnly()) { handleLoadFailure(modelId, modelMustBeDeployedError(modelId)); return; } @@ -419,19 +505,21 @@ private void loadWithoutCaching( // If we the model is not loaded and we did not kick off a new loading attempt, this means that we may be getting called // by a simulated pipeline provider.getTrainedModel(modelId, GetTrainedModelsAction.Includes.empty(), parentTaskId, ActionListener.wrap(trainedModelConfig -> { + // If the model is used in an unsupported context, fail here + if (consumer.inferenceConfigSupported(trainedModelConfig.getInferenceConfig()) == false) { + handleLoadFailure( + modelId, + modelUnsupportedInUsageContext( + modelId, + trainedModelConfig.getModelType(), + trainedModelConfig.getInferenceConfig(), + consumer + ) + ); + return; + } // If the model should be allocated, we should fail here if (trainedModelConfig.isAllocateOnly()) { - if (consumer == Consumer.SEARCH) { - modelActionListener.onFailure( - new ElasticsearchStatusException( - "model [{}] with type [{}] is currently not usable in search.", - RestStatus.BAD_REQUEST, - modelId, - trainedModelConfig.getModelType() - ) - ); - return; - } modelActionListener.onFailure(modelMustBeDeployedError(modelId)); return; } @@ -457,6 +545,7 @@ private void loadWithoutCaching( trainedModelConfig.getDefaultFieldMap(), inferenceConfig, trainedModelConfig.getLicenseLevel(), + trainedModelConfig.getModelType(), modelStatsService, trainedModelCircuitBreaker ) @@ -500,7 +589,7 @@ private void updateCircuitBreakerEstimate( } } - private ElasticsearchStatusException modelMustBeDeployedError(String modelId) { + private static ElasticsearchStatusException modelMustBeDeployedError(String modelId) { return new ElasticsearchStatusException( "Model [{}] must be deployed to use. Please deploy with the start trained model deployment API.", RestStatus.BAD_REQUEST, @@ -508,6 +597,22 @@ private ElasticsearchStatusException modelMustBeDeployedError(String modelId) { ); } + private static ElasticsearchStatusException modelUnsupportedInUsageContext( + String modelId, + TrainedModelType modelType, + InferenceConfig inferenceConfig, + Consumer consumer + ) { + return new ElasticsearchStatusException( + "Trained model [{}] with type [{}] and task [{}] is currently not usable in [{}].", + RestStatus.BAD_REQUEST, + modelId, + modelType, + Optional.ofNullable(inferenceConfig).map(InferenceConfig::getName).orElse("_unknown_"), + consumer.exceptionName() + ); + } + private void handleLoadSuccess( String modelId, Consumer consumer, @@ -526,6 +631,7 @@ private void handleLoadSuccess( trainedModelConfig.getDefaultFieldMap(), inferenceConfig, trainedModelConfig.getLicenseLevel(), + Optional.ofNullable(trainedModelConfig.getModelType()).orElse(TrainedModelType.TREE_ENSEMBLE), modelStatsService, trainedModelCircuitBreaker ); @@ -536,7 +642,7 @@ private void handleLoadSuccess( // Also, if the consumer is a search consumer, we should always cache it if (referencedModels.contains(modelId) || Sets.haveNonEmptyIntersection(modelIdToModelAliases.getOrDefault(modelId, new HashSet<>()), referencedModels) - || consumer.equals(Consumer.SEARCH)) { + || consumer.equals(Consumer.SEARCH_AGGS)) { try { // The local model may already be in cache. If it is, we don't bother adding it to cache. // If it isn't, we flip an `isLoaded` flag, and increment the model counter to make sure if it is evicted @@ -699,7 +805,7 @@ public void clusterChanged(ClusterChangedEvent event) { ); if (oldModelAliasesNotReferenced && newModelAliasesNotReferenced && modelIsNotReferenced) { ModelAndConsumer modelAndConsumer = localModelCache.get(modelId); - if (modelAndConsumer != null && modelAndConsumer.consumers.contains(Consumer.SEARCH) == false) { + if (modelAndConsumer != null && modelAndConsumer.consumers.contains(Consumer.SEARCH_AGGS) == false) { logger.trace("[{} ({})] invalidated from cache", modelId, modelAliasOrId); localModelCache.invalidate(modelId); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java new file mode 100644 index 0000000000000..36bf36ef99c52 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.apache.lucene.index.LeafReaderContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public interface FeatureExtractor { + void setNextReader(LeafReaderContext segmentContext) throws IOException; + + void addFeatures(Map featureMap, int docId) throws IOException; + + List featureNames(); +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java new file mode 100644 index 0000000000000..9f0ef84fc3575 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.lookup.Source; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class FieldValueFeatureExtractor implements FeatureExtractor { + + record FieldValueFetcher(String fieldName, ValueFetcher valueFetcher) {} + + private LeafReaderContext segmentContext; + private final List documentFieldNames; + private final List valueFetcherList; + private final SearchLookup sourceLookup; + + FieldValueFeatureExtractor(List documentFieldNames, SearchExecutionContext executionContext) { + this.documentFieldNames = documentFieldNames; + this.valueFetcherList = documentFieldNames.stream().map(s -> { + MappedFieldType mappedFieldType = executionContext.getFieldType(s); + if (mappedFieldType != null) { + return new FieldValueFetcher(s, mappedFieldType.valueFetcher(executionContext, null)); + } + return null; + }).filter(Objects::nonNull).toList(); + this.sourceLookup = executionContext.lookup(); + } + + @Override + public void setNextReader(LeafReaderContext segmentContext) { + this.segmentContext = segmentContext; + for (FieldValueFetcher vf : valueFetcherList) { + vf.valueFetcher().setNextReader(segmentContext); + } + } + + @Override + public void addFeatures(Map featureMap, int docId) throws IOException { + Source source = sourceLookup.getSource(this.segmentContext, docId); + for (FieldValueFetcher vf : this.valueFetcherList) { + featureMap.put(vf.fieldName(), vf.valueFetcher().fetchValues(source, docId, new ArrayList<>()).get(0)); + } + } + + @Override + public List featureNames() { + return documentFieldNames; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java index 6a2782ad93e67..e8905975b052b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java @@ -15,11 +15,9 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.lookup.Source; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.rescore.Rescorer; -import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdate; import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; import java.io.IOException; @@ -64,7 +62,6 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r rescoreContext.setRescoredDocs(topNDocIDs); ScoreDoc[] hitsToRescore = topNFirstPass.scoreDocs; Arrays.sort(hitsToRescore, Comparator.comparingInt(a -> a.doc)); - SearchLookup sourceLookup = ltrRescoreContext.executionContext.lookup(); int hitUpto = 0; int readerUpto = -1; int endDoc = 0; @@ -72,8 +69,9 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r List leaves = ltrRescoreContext.executionContext.searcher().getIndexReader().leaves(); LeafReaderContext currentSegment = null; boolean changedSegment = true; + List featureExtractors = ltrRescoreContext.buildFeatureExtractors(); List> docFeatures = new ArrayList<>(topNDocIDs.size()); - int featureSize = ltrRescoreContext.valueFetcherList.size(); + int featureSize = featureExtractors.stream().mapToInt(fe -> fe.featureNames().size()).sum(); while (hitUpto < hitsToRescore.length) { final ScoreDoc hit = hitsToRescore[hitUpto]; final int docID = hit.doc; @@ -87,16 +85,15 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r if (changedSegment) { // We advanced to another segment and update our document value fetchers docBase = currentSegment.docBase; - for (InferenceRescorerContext.FieldValueFetcher vf : ltrRescoreContext.valueFetcherList) { - vf.valueFetcher().setNextReader(currentSegment); + for (FeatureExtractor featureExtractor : featureExtractors) { + featureExtractor.setNextReader(currentSegment); } changedSegment = false; } int targetDoc = docID - docBase; Map features = Maps.newMapWithExpectedSize(featureSize); - Source source = sourceLookup.getSource(currentSegment, targetDoc); - for (InferenceRescorerContext.FieldValueFetcher vf : ltrRescoreContext.valueFetcherList) { - features.put(vf.fieldName(), vf.valueFetcher().fetchValues(source, targetDoc, new ArrayList<>()).get(0)); + for (FeatureExtractor featureExtractor : featureExtractors) { + featureExtractor.addFeatures(features, targetDoc); } docFeatures.add(features); hitUpto++; @@ -104,7 +101,7 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r for (int i = 0; i < hitsToRescore.length; i++) { Map features = docFeatures.get(i); try { - hitsToRescore[i].score = ((Number) definition.infer(features, RegressionConfigUpdate.EMPTY_PARAMS).predictedValue()) + hitsToRescore[i].score = ((Number) definition.infer(features, LearnToRankConfigUpdate.EMPTY_PARAMS).predictedValue()) .floatValue(); } catch (Exception ex) { logger.warn("Failure rescoring doc...", ex); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java index d9eb758383a51..0885e0e5ffce4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java @@ -114,7 +114,7 @@ public RescorerBuilder rewrite(QueryRewriteContext ctx throw new IllegalStateException("Model loading service must be available"); } SetOnce inferenceDefinitionSetOnce = new SetOnce<>(); - ctx.registerAsyncAction((c, l) -> modelLoadingServiceSupplier.get().getModelForSearch(modelId, ActionListener.wrap(lm -> { + ctx.registerAsyncAction((c, l) -> modelLoadingServiceSupplier.get().getModelForLearnToRank(modelId, ActionListener.wrap(lm -> { inferenceDefinitionSetOnce.set(lm); l.onResponse(null); }, l::onFailure))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java index 4fec10dc1bc3c..4e2cbfb8d3ac7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java @@ -7,23 +7,18 @@ package org.elasticsearch.xpack.ml.inference.rescorer; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.rescore.Rescorer; import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; +import java.util.ArrayList; import java.util.List; -import java.util.Objects; public class InferenceRescorerContext extends RescoreContext { - record FieldValueFetcher(String fieldName, ValueFetcher valueFetcher) {} - final SearchExecutionContext executionContext; final LocalModel inferenceDefinition; - final List valueFetcherList; /** * @param windowSize how many documents to rescore @@ -40,16 +35,16 @@ public InferenceRescorerContext( super(windowSize, rescorer); this.executionContext = executionContext; this.inferenceDefinition = inferenceDefinition; - if (inferenceDefinition != null) { - this.valueFetcherList = inferenceDefinition.inputFields().stream().map(s -> { - MappedFieldType mappedFieldType = executionContext.getFieldType(s); - if (mappedFieldType != null) { - return new InferenceRescorerContext.FieldValueFetcher(s, mappedFieldType.valueFetcher(executionContext, null)); - } - return null; - }).filter(Objects::nonNull).toList(); - } else { - valueFetcherList = List.of(); + } + + List buildFeatureExtractors() { + assert this.inferenceDefinition != null; + List featureExtractors = new ArrayList<>(); + if (this.inferenceDefinition.inputFields().isEmpty() == false) { + featureExtractors.add( + new FieldValueFeatureExtractor(new ArrayList<>(this.inferenceDefinition.inputFields()), this.executionContext) + ); } + return featureExtractors; } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java index 5cebcf076133c..4709925dbe7d7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.license.License; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.preprocessing.OneHotEncoding; import org.elasticsearch.xpack.core.ml.inference.results.ClassificationInferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; @@ -81,6 +82,7 @@ public void testClassificationInfer() throws Exception { Collections.singletonMap("field.foo", "field.foo.keyword"), ClassificationConfig.EMPTY_PARAMS, randomFrom(License.OperationMode.values()), + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -119,6 +121,7 @@ public void testClassificationInfer() throws Exception { Collections.singletonMap("field.foo", "field.foo.keyword"), ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -171,6 +174,7 @@ public void testClassificationInferWithDifferentPredictionFieldTypes() throws Ex Collections.singletonMap("field.foo", "field.foo.keyword"), ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -233,6 +237,7 @@ public void testRegression() throws Exception { Collections.singletonMap("bar", "bar.keyword"), RegressionConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -265,6 +270,7 @@ public void testAllFieldsMissing() throws Exception { null, RegressionConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -300,6 +306,7 @@ public void testInferPersistsStatsAfterNumberOfCalls() throws Exception { null, ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -359,6 +366,7 @@ public void testReferenceCounting() throws IOException { null, ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, breaker ); @@ -385,6 +393,7 @@ public void testReferenceCounting() throws IOException { null, ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, breaker ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java index 11e7fe04af2cc..644b619537741 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java @@ -414,7 +414,7 @@ public void testGetModelForSearch() throws Exception { for (int i = 0; i < 3; i++) { PlainActionFuture future = new PlainActionFuture<>(); - modelLoadingService.getModelForSearch(modelId, future); + modelLoadingService.getModelForAggregation(modelId, future); assertThat(future.get(), is(not(nullValue()))); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java index 0f896e45592cd..710c9a49bbfdf 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.index.query.DataRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.test.AbstractBuilderTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; @@ -31,7 +30,6 @@ import java.io.IOException; import java.util.List; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -102,14 +100,13 @@ public void testBuildContext() { when(localModel.inputFields()).thenReturn(inputFields); SearchExecutionContext context = createSearchExecutionContext(); InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder("test_model", localModel); - RescoreContext rescoreContext = inferenceRescorerBuilder.innerBuildContext(20, context); - assertTrue(rescoreContext instanceof InferenceRescorerContext); + InferenceRescorerContext rescoreContext = inferenceRescorerBuilder.innerBuildContext(20, context); + assertNotNull(rescoreContext); assertThat(rescoreContext.getWindowSize(), equalTo(20)); - assertThat(((InferenceRescorerContext) rescoreContext).valueFetcherList, hasSize(2)); + List featureExtractors = rescoreContext.buildFeatureExtractors(); + assertThat(featureExtractors, hasSize(1)); assertThat( - ((InferenceRescorerContext) rescoreContext).valueFetcherList.stream() - .map(InferenceRescorerContext.FieldValueFetcher::fieldName) - .collect(Collectors.toList()), + featureExtractors.stream().flatMap(featureExtractor -> featureExtractor.featureNames().stream()).toList(), containsInAnyOrder(DOUBLE_FIELD_NAME, INT_FIELD_NAME) ); } @@ -134,7 +131,7 @@ private static class TestModelLoader extends ModelLoadingService { } @Override - public void getModelForSearch(String modelId, ActionListener modelActionListener) { + public void getModelForLearnToRank(String modelId, ActionListener modelActionListener) { modelActionListener.onResponse(localModel()); } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index f6880f00d23eb..69649ee6c1f97 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -760,8 +760,8 @@ public void testToXContent() throws IOException { "master" ], "version": "%s", - "minIndexVersion":"%s", - "maxIndexVersion":"%s" + "min_index_version":%s, + "max_index_version":%s } }, "transport_versions": [] diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java index 06883d88c50ec..1efeda816f59f 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java @@ -81,9 +81,7 @@ public void testFailRestoreOnTooOldVersion() { SnapshotRestoreException e = expectThrows(SnapshotRestoreException.class, () -> clusterAdmin().restoreSnapshot(req).actionGet()); assertThat( e.getMessage(), - containsString( - "the snapshot was created with Elasticsearch version [2.0.0] " + "which isn't supported by the archive functionality" - ) + containsString("the snapshot has indices of version [2000099] which isn't supported by the archive functionality") ); } diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java index 155001f0d2c43..df22c6d8ea9a6 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java @@ -33,6 +33,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; @@ -84,7 +85,7 @@ public class OldLuceneVersions extends Plugin implements IndexStorePlugin, Clust License.OperationMode.ENTERPRISE ); - private static Version MINIMUM_ARCHIVE_VERSION = Version.fromString("5.0.0"); + private static final IndexVersion MINIMUM_ARCHIVE_VERSION = IndexVersion.fromId(5000099); private final SetOnce failShardsListener = new SetOnce<>(); @@ -155,7 +156,7 @@ public void afterFilesRestoredFromRepository(IndexShard indexShard) { } @Override - public BiConsumer addPreRestoreVersionCheck() { + public BiConsumer addPreRestoreVersionCheck() { return (snapshot, version) -> { if (version.isLegacyIndexVersion()) { if (ARCHIVE_FEATURE.checkWithoutTracking(getLicenseState()) == false) { @@ -164,9 +165,7 @@ public BiConsumer addPreRestoreVersionCheck() { if (version.before(MINIMUM_ARCHIVE_VERSION)) { throw new SnapshotRestoreException( snapshot, - "the snapshot was created with Elasticsearch version [" - + version - + "] which isn't supported by the archive functionality" + "the snapshot has indices of version [" + version + "] which isn't supported by the archive functionality" ); } } diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java index e2621d9262720..7bad037e6c819 100644 --- a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.profiler; -import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -18,10 +17,6 @@ import java.util.Objects; final class StackFrame implements ToXContentObject { - private static final String[] PATH_FILE_NAME = new String[] { "Stackframe", "file", "name" }; - private static final String[] PATH_FUNCTION_NAME = new String[] { "Stackframe", "function", "name" }; - private static final String[] PATH_FUNCTION_OFFSET = new String[] { "Stackframe", "function", "offset" }; - private static final String[] PATH_LINE_NUMBER = new String[] { "Stackframe", "line", "number" }; List fileName; List functionName; List functionOffset; @@ -46,26 +41,12 @@ private static List listOf(Object o) { } public static StackFrame fromSource(Map source) { - // stack frames may either be stored with synthetic source or regular one - // which results either in a nested or flat document structure. - - if (source.containsKey("Stackframe")) { - // synthetic source - return new StackFrame( - ObjectPath.eval(PATH_FILE_NAME, source), - ObjectPath.eval(PATH_FUNCTION_NAME, source), - ObjectPath.eval(PATH_FUNCTION_OFFSET, source), - ObjectPath.eval(PATH_LINE_NUMBER, source) - ); - } else { - // regular source - return new StackFrame( - source.get("Stackframe.file.name"), - source.get("Stackframe.function.name"), - source.get("Stackframe.function.offset"), - source.get("Stackframe.line.number") - ); - } + return new StackFrame( + source.get("Stackframe.file.name"), + source.get("Stackframe.function.name"), + source.get("Stackframe.function.offset"), + source.get("Stackframe.line.number") + ); } @Override diff --git a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java index b5a0a60bfea82..81bc8dd8f3629 100644 --- a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java +++ b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java @@ -23,25 +23,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class StackFrameTests extends ESTestCase { - public void testCreateFromSyntheticSource() { - // tag::noformat - StackFrame frame = StackFrame.fromSource( - Map.of("Stackframe", Map.of( - "file", Map.of("name", "Main.java"), - "function", Map.of( - "name", "helloWorld", - "offset", 31733 - ), - "line", Map.of("number", 22)) - ) - ); - // end::noformat - assertEquals(List.of("Main.java"), frame.fileName); - assertEquals(List.of("helloWorld"), frame.functionName); - assertEquals(List.of(31733), frame.functionOffset); - assertEquals(List.of(22), frame.lineNumber); - } - public void testCreateFromRegularSource() { // tag::noformat StackFrame frame = StackFrame.fromSource( diff --git a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml index 054126ce5171a..a2c46b3da601d 100644 --- a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml +++ b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml @@ -94,8 +94,10 @@ --- "Downsample datastream with tier preference": - skip: - version: " - 8.4.99" - reason: "rollup renamed to downsample in 8.5.0" + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97150" +# version: " - 8.4.99" +# reason: "rollup renamed to downsample in 8.5.0" - do: indices.put_index_template: diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java index e7705657fe56b..23af864be9ec2 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.downsample; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -14,7 +15,6 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java index 14b67e3cb2c2b..0130c0aaf8403 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.bulk.BulkProcessor2; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Rounding; @@ -45,7 +46,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.core.rollup.action.RollupShardTask; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index a239f4ed29c7f..7f3fa21811efc 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -68,7 +69,6 @@ import org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -533,28 +533,11 @@ private static void validateDownsamplingInterval(MapperService mapperService, Do if (meta.isEmpty() == false) { String interval = meta.get(config.getIntervalType()); if (interval != null) { - DateHistogramInterval sourceIndexInterval = new DateHistogramInterval(interval); - DateHistogramInterval targetIndexInterval = config.getInterval(); - long sourceMillis = sourceIndexInterval.estimateMillis(); - long targetMillis = targetIndexInterval.estimateMillis(); - if (sourceMillis >= targetMillis) { - // Downsampling interval must be greater than source interval - e.addValidationError( - "Source index is a downsampled index. Downsampling interval [" - + targetIndexInterval - + "] must be greater than the source index interval [" - + sourceIndexInterval - + "]" - ); - } else if (targetMillis % sourceMillis != 0) { - // Downsampling interval must be a multiple of the source interval - e.addValidationError( - "Source index is a downsampled index. Downsampling interval [" - + targetIndexInterval - + "] must be a multiple of the source index interval [" - + sourceIndexInterval - + "]" - ); + try { + DownsampleConfig sourceConfig = new DownsampleConfig(new DateHistogramInterval(interval)); + DownsampleConfig.validateSourceAndTargetIntervals(sourceConfig, config); + } catch (IllegalArgumentException exception) { + e.addValidationError("Source index is a downsampled index. " + exception.getMessage()); } } @@ -566,14 +549,13 @@ private static void validateDownsamplingInterval(MapperService mapperService, Do + config.getTimeZone() + "] cannot be different than the source index timezone [" + sourceTimezone - + "]" + + "]." ); } if (e.validationErrors().isEmpty() == false) { throw e; } - } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 4b39c34bca69c..d5c30dae0d9ac 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -77,7 +78,6 @@ import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java index 52b3037fd3e2c..aed478caa87ba 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -48,7 +49,6 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.rollup.Rollup; import org.hamcrest.Matchers; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java index 8fc73e6676846..7890463936a59 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; @@ -36,7 +37,6 @@ import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.rollup.Rollup; import org.junit.Before; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java index dcf48861e0212..8534ed268c03b 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java @@ -8,7 +8,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.client.internal.Client; @@ -41,6 +40,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.ReplicaShardAllocator; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -171,9 +171,9 @@ public void allocateUnassigned( final Snapshot snapshot = new Snapshot(repositoryName, snapshotId); - final Version version = shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT + final IndexVersion version = shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT ? ((RecoverySource.SnapshotRecoverySource) shardRouting.recoverySource()).version() - : Version.CURRENT; + : IndexVersion.current(); final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( recoveryUuid, diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index 9534095a82aa6..1c4ef99a32a87 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -14,7 +14,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; -import org.elasticsearch.Version; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -37,6 +36,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; @@ -241,7 +241,7 @@ protected static SearchableSnapshotRecoveryState createRecoveryState(boolean fin new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("some_index", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java index dacd973ab1e08..14bcc35686dbe 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.SearchableSnapshotsSettings; @@ -265,7 +266,7 @@ private static RecoverySource.SnapshotRecoverySource randomSnapshotSource(ShardI return new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(random()), new Snapshot("test-repo", new SnapshotId("test-snap", UUIDs.randomBase64UUID(random()))), - Version.CURRENT, + IndexVersion.current(), new IndexId(shardId.getIndexName(), UUIDs.randomBase64UUID(random())) ); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java index aa35f7e3be224..25049cf5791bd 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java @@ -9,7 +9,6 @@ import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.blobcache.BlobCacheTestUtils; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; @@ -689,7 +688,7 @@ protected IndexInputStats createIndexInputStats(long numFiles, long totalSize, l new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("some_index", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java index 28f9229f5abeb..6aff36d3c8095 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java @@ -32,6 +32,8 @@ public class RestAuthenticateActionTests extends SecurityIntegTestCase { private static boolean anonymousEnabled; + private static boolean operatorUser; + private static boolean operatorPrivilegesEnabled; private static String domainName; @BeforeClass @@ -44,11 +46,26 @@ public static void maybeSetDomain() { domainName = randomFrom(randomAlphaOfLengthBetween(3, 5), null); } + @BeforeClass + public static void maybeSetOperator() { + operatorUser = randomBoolean(); + operatorPrivilegesEnabled = randomBoolean(); + } + @Override protected boolean addMockHttpTransport() { return false; // enable http } + @Override + protected String configOperatorUsers() { + return super.configOperatorUsers() + + "operator:\n" + + " - usernames: ['" + + (operatorUser ? SecuritySettingsSource.TEST_USER_NAME : "_another_user") + + "']\n"; + } + @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); @@ -61,6 +78,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { if (domainName != null) { builder.put(DOMAIN_TO_REALM_ASSOC_SETTING.getConcreteSettingForNamespace(domainName).getKey(), "file"); } + builder.put("xpack.security.operator_privileges.enabled", operatorPrivilegesEnabled); return builder.build(); } @@ -101,6 +119,11 @@ public void testAuthenticateApi() throws Exception { assertThat(roles.size(), is(1)); assertThat(roles, contains(SecuritySettingsSource.TEST_ROLE)); } + if (operatorUser && operatorPrivilegesEnabled) { + assertThat(objectPath.evaluate("operator"), equalTo(true)); + } else { + assertThat(objectPath.evaluate("operator"), equalTo(null)); + } } public void testAuthenticateApiWithoutAuthentication() throws Exception { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java index 0ca85ada4b5e1..d6063254b327e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.InternalUser; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.operator.OperatorPrivileges; public class TransportAuthenticateAction extends HandledTransportAction { @@ -56,7 +57,12 @@ protected void doExecute(Task task, AuthenticateRequest request, ActionListener< } else if (runAsUser instanceof InternalUser) { listener.onFailure(new IllegalArgumentException("user [" + runAsUser.principal() + "] is internal")); } else { - listener.onResponse(new AuthenticateResponse(authentication.maybeAddAnonymousRoles(anonymousUser))); + listener.onResponse( + new AuthenticateResponse( + authentication.maybeAddAnonymousRoles(anonymousUser), + OperatorPrivileges.isOperator(securityContext.getThreadContext()) + ) + ); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 47091c75a0e70..ea0870c1f9875 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -21,7 +21,7 @@ import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; import static org.elasticsearch.action.support.replication.PostWriteRefresh.POST_WRITE_REFRESH_ORIGIN; -import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN; +import static org.elasticsearch.cluster.metadata.DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; import static org.elasticsearch.ingest.IngestService.INGEST_ORIGIN; import static org.elasticsearch.persistent.PersistentTasksService.PERSISTENT_TASK_ORIGIN; import static org.elasticsearch.synonyms.SynonymsManagementAPIService.SYNONYMS_ORIGIN; @@ -128,7 +128,7 @@ public static void switchUserBasedOnActionOriginAndExecute( case POST_WRITE_REFRESH_ORIGIN: securityContext.executeAsInternalUser(InternalUsers.STORAGE_USER, version, consumer); break; - case DLM_ORIGIN: + case DATA_STREAM_LIFECYCLE_ORIGIN: securityContext.executeAsInternalUser(InternalUsers.DATA_STREAM_LIFECYCLE_USER, version, consumer); break; case WATCHER_ORIGIN: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java index c6ddef0f5193a..95c487a67cfdb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java @@ -34,6 +34,12 @@ public class OperatorPrivileges { Setting.Property.NodeScope ); + public static boolean isOperator(ThreadContext threadContext) { + return AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR.equals( + threadContext.getHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY) + ); + } + public interface OperatorPrivilegesService { /** * Set a ThreadContext Header {@link AuthenticationField#PRIVILEGE_CATEGORY_KEY} if authentication @@ -126,9 +132,7 @@ public ElasticsearchSecurityException check( if (user instanceof InternalUser && false == authentication.isRunAs()) { return null; } - if (false == AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR.equals( - threadContext.getHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY) - )) { + if (false == isOperator(threadContext)) { // Only check whether request is operator-only when user is NOT an operator logger.trace("Checking operator-only violation for user [{}] and action [{}]", user, action); final OperatorPrivilegesViolation violation = operatorOnlyRegistry.check(action, request); @@ -144,9 +148,7 @@ public boolean checkRest(RestHandler restHandler, RestRequest restRequest, RestC if (false == shouldProcess()) { return true; } - if (false == AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR.equals( - threadContext.getHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY) - )) { + if (false == isOperator(threadContext)) { // Only check whether request is operator-only when user is NOT an operator if (logger.isTraceEnabled()) { Authentication authentication = threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java index b48393559fc4c..680f6b0f14f5e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java @@ -63,11 +63,10 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c new RestBuilderListener(channel) { @Override public RestResponse buildResponse(AuthenticateResponse authenticateResponse, XContentBuilder builder) throws Exception { - authenticateResponse.authentication().toXContent(builder, ToXContent.EMPTY_PARAMS); + authenticateResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); return new RestResponse(RestStatus.OK, builder); } } ); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index 641c7e69d706c..574857e071dfa 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; @@ -42,6 +44,8 @@ public class TransportAuthenticateActionTests extends ESTestCase { + private ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + public void testInternalUser() { SecurityContext securityContext = mock(SecurityContext.class); final Authentication authentication = AuthenticationTestHelper.builder().internal().build(); @@ -123,6 +127,13 @@ public void testValidAuthentication() { final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); final Authentication authentication = AuthenticationTestHelper.builder().user(user).build(); final User effectiveUser = authentication.getEffectiveSubject().getUser(); + final boolean operator = randomBoolean(); + + if (operator) { + threadContext.putHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY, AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR); + } else if (randomBoolean()) { + threadContext.putHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY, AuthenticationField.PRIVILEGE_CATEGORY_VALUE_EMPTY); + } TransportAuthenticateAction action = prepareAction(anonymousUser, effectiveUser, authentication); @@ -141,6 +152,7 @@ public void onFailure(Exception e) { }); assertThat(responseRef.get(), notNullValue()); + assertThat(responseRef.get().isOperator(), is(operator)); if (anonymousUser.enabled() && false == (authentication.isApiKey() || authentication.isCrossClusterAccess())) { // Roles of anonymousUser are added to non api key authentication final Authentication auth = responseRef.get().authentication(); @@ -210,6 +222,7 @@ private TransportAuthenticateAction prepareAction(AnonymousUser anonymousUser, U SecurityContext securityContext = mock(SecurityContext.class); when(securityContext.getAuthentication()).thenReturn(authentication); when(securityContext.getUser()).thenReturn(user); + when(securityContext.getThreadContext()).thenReturn(this.threadContext); TransportService transportService = new TransportService( Settings.EMPTY, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java index b98cb797ed7ac..a8324e3e1ff32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -121,7 +121,11 @@ public void testSwitchAndExecuteSecurityProfileUser() throws Exception { } public void testSwitchWithDlmOrigin() throws Exception { - assertSwitchBasedOnOriginAndExecute(DataLifecycle.DLM_ORIGIN, InternalUsers.DATA_STREAM_LIFECYCLE_USER, randomTransportVersion()); + assertSwitchBasedOnOriginAndExecute( + DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN, + InternalUsers.DATA_STREAM_LIFECYCLE_USER, + randomTransportVersion() + ); } public void testSwitchAndExecuteXpackUser() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 834ba2e46d676..22e900fc45195 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -2920,8 +2920,7 @@ private RoleProviders buildRolesProvider( }).when(nativeRolesStore).getRoleDescriptors(isASet(), anyActionListener()); } if (reservedRolesStore == null) { - reservedRolesStore = mock(ReservedRolesStore.class); - doCallRealMethod().when(reservedRolesStore).accept(anySet(), anyActionListener()); + reservedRolesStore = new ReservedRolesStore(); } if (licenseState == null) { licenseState = new XPackLicenseState(() -> 0); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index a7a6aaee32e8e..238abd202b8bb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -78,7 +78,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; -import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN; +import static org.elasticsearch.cluster.metadata.DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; @@ -430,7 +430,7 @@ public void testSetUserBasedOnActionOrigin() { InternalUsers.XPACK_USER, ASYNC_SEARCH_ORIGIN, InternalUsers.ASYNC_SEARCH_USER, - DLM_ORIGIN, + DATA_STREAM_LIFECYCLE_ORIGIN, InternalUsers.DATA_STREAM_LIFECYCLE_USER ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java index 000a8a90b14bc..d0cba6b2381d4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java @@ -674,8 +674,6 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th ch.flushInbound(); }).get(); testThreadPool.generic().submit(() -> ch.close().get()).get(); - assertThat(dispatchThrowableReference.get().toString(), containsString("Connection closed before received headers")); - assertThat(badDispatchInvocationCount.get(), is(6)); assertThat(authnInvocationCount.get(), is(0)); } } finally { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml index 5b0d55b372231..ea1dd5bb99610 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml @@ -442,3 +442,100 @@ - close_to: { aggregations.ts.buckets.0.rate-month.value: { value: 63958020.00, error: 0.01 }} - close_to: { aggregations.ts.buckets.0.rate-quarter.value: { value: 191874060.00, error: 0.01 }} - close_to: { aggregations.ts.buckets.0.rate-year.value: { value: 767496240.00, error: 0.01 }} + +--- +"rate aggregation on counter field partial bucket": + - skip: + version: " - 8.6.99" + reason: "counter field support added in 8.7" + features: close_to + + - do: + indices.create: + index: test-rate + body: + settings: + index: + mode: time_series + routing_path: [ host ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + host: + type: keyword + time_series_dimension: true + bytes_counter: + type: long + time_series_metric: counter + + - do: + bulk: + refresh: true + index: test-rate + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:04.000Z", "host": "one", "bytes_counter": 1000 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:14.000Z", "host": "one", "bytes_counter": 1100 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:24.000Z", "host": "one", "bytes_counter": 1200 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:34.000Z", "host": "one", "bytes_counter": 1250 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:44.000Z", "host": "one", "bytes_counter": 1310 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:54.000Z", "host": "one", "bytes_counter": 1350 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:01.000Z", "host": "two", "bytes_counter": 1000 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:11.000Z", "host": "two", "bytes_counter": 1100 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:21.000Z", "host": "two", "bytes_counter": 1200 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:31.000Z", "host": "two", "bytes_counter": 1250 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:41.000Z", "host": "two", "bytes_counter": 1310 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:51.000Z", "host": "two", "bytes_counter": 1350 }' + + - do: + search: + index: test-rate + body: + size: 0 + query: + bool: + filter: + range: + "@timestamp": + gte: "2021-04-28T18:01:03.000Z" + lte: "2021-04-28T18:18:00.000Z" + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + calendar_interval: 1h + time_zone: Europe/Ljubljana + min_doc_count: 1 + aggs: + counter_rate: + time_series: + keyed: false + aggs: + bytes_counter_rate: + rate: + field: bytes_counter + unit: second + + - match: { hits.total.value: 11 } + - length: { aggregations.date_histogram.buckets: 1 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T20:00:00.000+02:00" } + - match: { aggregations.date_histogram.buckets.0.doc_count: 11 } + # NOTE: (1350 - 1000) / (54 - 4) = 350 / 50 = 7.0 + - close_to: { aggregations.date_histogram.buckets.0.counter_rate.buckets.0.bytes_counter_rate.value: { value: 7.00, error: 0.01 } } + # NOTE: (1350 - 1100) / (51 - 11) = 250 / 40 = 6.25 (we filter out the first sample due to the bool range filter) + - close_to: { aggregations.date_histogram.buckets.0.counter_rate.buckets.1.bytes_counter_rate.value: { value: 6.25, error: 0.01 } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml index 83ee76ba5a463..824999b3b3008 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml @@ -11,7 +11,7 @@ setup: "description": "super complex model for tests", "input": {"field_names": ["cost", "product"]}, "inference_config": { - "regression": { + "learn_to_rank": { } }, "definition": { @@ -134,6 +134,10 @@ setup: --- "Test rescore with stored model": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + - do: search: index: store @@ -166,6 +170,10 @@ setup: - match: { hits.hits.2._score: 3.0 } --- "Test rescore with stored model and smaller window_size": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + - do: search: index: store @@ -184,6 +192,10 @@ setup: - match: { hits.hits.4._score: 1.0 } --- "Test rescore with stored model and chained rescorers": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + - do: search: index: store diff --git a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java index 3a6acd5ac9e99..f615dca114996 100644 --- a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.cluster.coordination.votingonly; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -21,6 +20,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -219,7 +219,7 @@ public void testBasicSnapshotRestoreWorkFlow() { assertThat(snapshotInfos.size(), Matchers.equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), Matchers.equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), Matchers.equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), Matchers.equalTo(IndexVersion.current())); logger.info("--> close indices"); client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index d111e807a195f..244148fa79188 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -30,7 +30,7 @@ dependencies { // watcher deps api 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:20211018.2' - runtimeOnly 'com.google.guava:guava:30.1-jre' // needed by watcher for the html sanitizer + runtimeOnly 'com.google.guava:guava:32.0.1-jre' // needed by watcher for the html sanitizer runtimeOnly 'com.google.guava:failureaccess:1.0.1' api 'com.sun.mail:jakarta.mail:1.6.4' api 'com.sun.activation:jakarta.activation:1.2.1' @@ -54,7 +54,6 @@ tasks.named("thirdPartyAudit").configure { 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', 'com.google.common.hash.Striped64', 'com.google.common.hash.Striped64$1', 'com.google.common.hash.Striped64$Cell', diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index f4acc4491deff..bb018372b1542 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -213,7 +213,7 @@ private void beforeRestart( assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.successful")); assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.total")); assertEquals(0, (int) getResp.evaluate("snapshots.0.shards.failed")); - assertEquals(oldVersion.toString(), getResp.evaluate("snapshots.0.version")); + assertEquals(oldVersion.indexVersion.toString(), getResp.evaluate("snapshots.0.version")); // list specific snapshot on new ES getSnaps = new Request("GET", "/_snapshot/" + repoName + "/" + snapshotName); @@ -227,7 +227,7 @@ private void beforeRestart( assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.successful")); assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.total")); assertEquals(0, (int) getResp.evaluate("snapshots.0.shards.failed")); - assertEquals(oldVersion.toString(), getResp.evaluate("snapshots.0.version")); + assertEquals(oldVersion.indexVersion.toString(), getResp.evaluate("snapshots.0.version")); // list advanced snapshot info on new ES getSnaps = new Request("GET", "/_snapshot/" + repoName + "/" + snapshotName + "/_status");