From 79982b00a7f47637fa2bf7eb8d13b245444df17e Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Mon, 28 Apr 2025 12:15:42 +0100 Subject: [PATCH 1/8] updating standard analyzer docs Signed-off-by: Anton Rubin --- _analyzers/supported-analyzers/standard.md | 88 +++++++++++++--------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/_analyzers/supported-analyzers/standard.md b/_analyzers/supported-analyzers/standard.md index d5c3650d5da..20af96b22e9 100644 --- a/_analyzers/supported-analyzers/standard.md +++ b/_analyzers/supported-analyzers/standard.md @@ -7,17 +7,20 @@ nav_order: 50 # Standard analyzer -The `standard` analyzer is the default analyzer used when no other analyzer is specified. It is designed to provide a basic and efficient approach to generic text processing. +The `standard` analyzer is the built-in default analyzer used for general-purpose full-text search in OpenSearch and Elasticsearch. It is designed to provide consistent, language-agnostic text processing by efficiently breaking down text into searchable terms. -This analyzer consists of the following tokenizers and token filters: +The `standard` analyzer performs the following operations: -- `standard` tokenizer: Removes most punctuation and splits text on spaces and other common delimiters. -- `lowercase` token filter: Converts all tokens to lowercase, ensuring case-insensitive matching. -- `stop` token filter: Removes common stopwords, such as "the", "is", and "and", from the tokenized output. +- **Tokenization**: It uses the [`standard`]({{site.url}}{{site.baseurl}}/analyzers/tokenizers/standard/) tokenizer, which splits text into words based on Unicode text segmentation rules, handling spaces, punctuation, and common delimiters. +- **Lowercasing**: It applies the [`lowercase`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/lowercase/) token filter to convert all tokens to lowercase, ensuring consistent matching regardless of input case. -## Example +This combination makes the `standard` analyzer ideal for indexing a wide range of natural language content without needing language-specific customizations. -Use the following command to create an index named `my_standard_index` with a `standard` analyzer: +--- + +## Example: Creating an index with the standard analyzer + +You can assign the `standard` analyzer to a text field when creating an index: ```json PUT /my_standard_index @@ -26,7 +29,7 @@ PUT /my_standard_index "properties": { "my_field": { "type": "text", - "analyzer": "standard" + "analyzer": "standard" } } } @@ -34,33 +37,33 @@ PUT /my_standard_index ``` {% include copy-curl.html %} -## Parameters +--- -You can configure a `standard` analyzer with the following parameters. +## Parameters -Parameter | Required/Optional | Data type | Description -:--- | :--- | :--- | :--- -`max_token_length` | Optional | Integer | Sets the maximum length of the produced token. If this length is exceeded, the token is split into multiple tokens at the length configured in `max_token_length`. Default is `255`. -`stopwords` | Optional | String or list of strings | A string specifying a predefined list of stopwords (such as `_english_`) or an array specifying a custom list of stopwords. Default is `_none_`. -`stopwords_path` | Optional | String | The path (absolute or relative to the config directory) to the file containing a list of stop words. +The `standard` analyzer supports the following parameters: +| Parameter | Type | Default | Description | +|:----------|:-----|:--------|:------------| +| `max_token_length` | Integer | `255` | Sets the maximum length of a token before it is split. | +| `stopwords` | List or String | None | A list of stopwords or a predefined stopword set like `_english_` to remove during analysis. | +| `stopwords_path` | String | None | Path to a file containing stopwords to be used during analysis. | -## Configuring a custom analyzer +## Example: Analyzer with parameters -Use the following command to configure an index with a custom analyzer that is equivalent to the `standard` analyzer: +The following example crated index `products` and configures `max_token_length` and `stopwords`: ```json -PUT /my_custom_index +PUT /animals { "settings": { "analysis": { "analyzer": { - "my_custom_analyzer": { - "type": "custom", - "tokenizer": "standard", - "filter": [ - "lowercase", - "stop" + "my_manual_stopwords_analyzer": { + "type": "standard", + "max_token_length": 10, + "stopwords": [ + "the", "is", "and", "but", "an", "a", "it" ] } } @@ -70,28 +73,43 @@ PUT /my_custom_index ``` {% include copy-curl.html %} -## Generated tokens - -Use the following request to examine the tokens generated using the analyzer: +Use the following `_analyze` API to see how the `my_manual_stopwords_analyzer` processes text: ```json -POST /my_custom_index/_analyze +POST /animals/_analyze { - "analyzer": "my_custom_analyzer", - "text": "The slow turtle swims away" + "analyzer": "my_manual_stopwords_analyzer", + "text": "The Turtle is Large but it is Slow" } ``` {% include copy-curl.html %} -The response contains the generated tokens: +The returned token are separated based on spacing, lowercased and stopwords are removed: ```json { "tokens": [ - {"token": "slow","start_offset": 4,"end_offset": 8,"type": "","position": 1}, - {"token": "turtle","start_offset": 9,"end_offset": 15,"type": "","position": 2}, - {"token": "swims","start_offset": 16,"end_offset": 21,"type": "","position": 3}, - {"token": "away","start_offset": 22,"end_offset": 26,"type": "","position": 4} + { + "token": "turtle", + "start_offset": 4, + "end_offset": 10, + "type": "", + "position": 1 + }, + { + "token": "large", + "start_offset": 14, + "end_offset": 19, + "type": "", + "position": 3 + }, + { + "token": "slow", + "start_offset": 30, + "end_offset": 34, + "type": "", + "position": 7 + } ] } ``` From 72c65e17efdb3f158fd8e719f51bc8a7b4b01155 Mon Sep 17 00:00:00 2001 From: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Date: Mon, 28 Apr 2025 09:21:30 -0400 Subject: [PATCH 2/8] Update _analyzers/supported-analyzers/standard.md Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> --- _analyzers/supported-analyzers/standard.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/supported-analyzers/standard.md b/_analyzers/supported-analyzers/standard.md index 20af96b22e9..3f31f35a175 100644 --- a/_analyzers/supported-analyzers/standard.md +++ b/_analyzers/supported-analyzers/standard.md @@ -7,7 +7,7 @@ nav_order: 50 # Standard analyzer -The `standard` analyzer is the built-in default analyzer used for general-purpose full-text search in OpenSearch and Elasticsearch. It is designed to provide consistent, language-agnostic text processing by efficiently breaking down text into searchable terms. +The `standard` analyzer is the built-in default analyzer used for general-purpose full-text search in OpenSearch. It is designed to provide consistent, language-agnostic text processing by efficiently breaking down text into searchable terms. The `standard` analyzer performs the following operations: From cfe0817ecac75c8b89c2f19b27e7eac71371952f Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Thu, 26 Jun 2025 11:58:10 +0100 Subject: [PATCH 3/8] addressing the PR comments Signed-off-by: Anton Rubin --- _analyzers/supported-analyzers/standard.md | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/_analyzers/supported-analyzers/standard.md b/_analyzers/supported-analyzers/standard.md index 3f31f35a175..275daa6fdf0 100644 --- a/_analyzers/supported-analyzers/standard.md +++ b/_analyzers/supported-analyzers/standard.md @@ -41,17 +41,20 @@ PUT /my_standard_index ## Parameters -The `standard` analyzer supports the following parameters: +The `standard` analyzer supports the following optional parameters: -| Parameter | Type | Default | Description | +| Parameter | Data Type | Default | Description | |:----------|:-----|:--------|:------------| | `max_token_length` | Integer | `255` | Sets the maximum length of a token before it is split. | -| `stopwords` | List or String | None | A list of stopwords or a predefined stopword set like `_english_` to remove during analysis. | +| `stopwords` | String or list of strings | None | A list of stopwords or a predefined stopword set like `_english_` to remove during analysis. | | `stopwords_path` | String | None | Path to a file containing stopwords to be used during analysis. | +Use only one of the parameters `stopwords` or `stopwords_path`. If both are used, no error is returned but only `stopwords` parameter is applied. +{: .note} + ## Example: Analyzer with parameters -The following example crated index `products` and configures `max_token_length` and `stopwords`: +The following example creates index `products` and configures `max_token_length` and `stopwords`: ```json PUT /animals @@ -84,7 +87,11 @@ POST /animals/_analyze ``` {% include copy-curl.html %} -The returned token are separated based on spacing, lowercased and stopwords are removed: +The returned tokens are: + +- separated based on spacing +- lowercased +- stopwords removed ```json { From a62523cbba99bb18d2ed880e4ed7ec9b9b17092f Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Thu, 26 Jun 2025 12:02:08 +0100 Subject: [PATCH 4/8] replacing add Data Type with Data type Signed-off-by: Anton Rubin --- _analyzers/supported-analyzers/standard.md | 2 +- _api-reference/index-apis/alias.md | 4 ++-- _im-plugin/index-transforms/transforms-apis.md | 6 +++--- _search-plugins/sql/datatypes.md | 2 +- _search-plugins/sql/ppl/index.md | 2 +- _search-plugins/sql/sql-ppl-api.md | 6 +++--- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/_analyzers/supported-analyzers/standard.md b/_analyzers/supported-analyzers/standard.md index 275daa6fdf0..84059e87a37 100644 --- a/_analyzers/supported-analyzers/standard.md +++ b/_analyzers/supported-analyzers/standard.md @@ -43,7 +43,7 @@ PUT /my_standard_index The `standard` analyzer supports the following optional parameters: -| Parameter | Data Type | Default | Description | +| Parameter | Data type | Default | Description | |:----------|:-----|:--------|:------------| | `max_token_length` | Integer | `255` | Sets the maximum length of a token before it is split. | | `stopwords` | String or list of strings | None | A list of stopwords or a predefined stopword set like `_english_` to remove during analysis. | diff --git a/_api-reference/index-apis/alias.md b/_api-reference/index-apis/alias.md index d90fdf606dc..7cd3bd1b998 100644 --- a/_api-reference/index-apis/alias.md +++ b/_api-reference/index-apis/alias.md @@ -25,7 +25,7 @@ POST _aliases All parameters are optional. -Parameter | Data Type | Description +Parameter | Data type | Description :--- | :--- | :--- cluster_manager_timeout | Time | The amount of time to wait for a response from the cluster manager node. Default is `30s`. timeout | Time | The amount of time to wait for a response from the cluster. Default is `30s`. @@ -34,7 +34,7 @@ timeout | Time | The amount of time to wait for a response from the cluster. Def In your request body, you need to specify what action to take, the alias name, and the index you want to associate with the alias. Other fields are optional. -Field | Data Type | Description | Required +Field | Data type | Description | Required :--- | :--- | :--- | :--- actions | Array | Set of actions you want to perform on the index. Valid options are: `add`, `remove`, and `remove_index`. You must have at least one action in the array. | Yes add | N/A | Adds an alias to the specified index. | No diff --git a/_im-plugin/index-transforms/transforms-apis.md b/_im-plugin/index-transforms/transforms-apis.md index 7e0803c38b4..8068776f441 100644 --- a/_im-plugin/index-transforms/transforms-apis.md +++ b/_im-plugin/index-transforms/transforms-apis.md @@ -28,7 +28,7 @@ PUT _plugins/_transform/ ### Path parameters -Parameter | Data Type | Description +Parameter | Data type | Description :--- | :--- | :--- transform_id | String | Transform ID | @@ -36,7 +36,7 @@ transform_id | String | Transform ID | You can specify the following options in the HTTP request body: -Option | Data Type | Description | Required +Option | Data type | Description | Required :--- | :--- | :--- | :--- enabled | Boolean | If true, the transform job is enabled at creation. | No continuous | Boolean | Specifies whether the transform job should be continuous. Continuous jobs execute every time they are scheduled according to the `schedule` field and run based off of newly transformed buckets as well as any new data added to source indexes. Non-continuous jobs execute only once. Default is `false`. | No @@ -184,7 +184,7 @@ Parameter | Description | Required You can update the following fields. -Option | Data Type | Description +Option | Data type | Description :--- | :--- | :--- schedule | Object | The schedule for the transform job. Contains the fields `interval.start_time`, `interval.period`, and `interval.unit`. start_time | Integer | The Unix epoch start time of the transform job. diff --git a/_search-plugins/sql/datatypes.md b/_search-plugins/sql/datatypes.md index c00f27c3e40..08356abc089 100644 --- a/_search-plugins/sql/datatypes.md +++ b/_search-plugins/sql/datatypes.md @@ -1,6 +1,6 @@ --- layout: default -title: Data Types +title: Data types parent: SQL and PPL nav_order: 7 --- diff --git a/_search-plugins/sql/ppl/index.md b/_search-plugins/sql/ppl/index.md index bda67aba36e..5e01498ac42 100644 --- a/_search-plugins/sql/ppl/index.md +++ b/_search-plugins/sql/ppl/index.md @@ -54,5 +54,5 @@ Developers can find information in the following resources: - [Piped Processing Language](https://github.com/opensearch-project/piped-processing-language) specification - [OpenSearch PPL Reference Manual](https://github.com/opensearch-project/sql/blob/main/docs/user/ppl/index.rst) - [Observability](https://github.com/opensearch-project/dashboards-observability/) using [PPL-based visualizations](https://github.com/opensearch-project/dashboards-observability#event-analytics) -- PPL [Data Types](https://github.com/opensearch-project/sql/blob/main/docs/user/ppl/general/datatypes.rst) +- PPL [Data types](https://github.com/opensearch-project/sql/blob/main/docs/user/ppl/general/datatypes.rst) - [Cross-cluster search](https://github.com/opensearch-project/sql/blob/main/docs/user/ppl/admin/cross_cluster_search.rst#using-cross-cluster-search-in-ppl) in PPL diff --git a/_search-plugins/sql/sql-ppl-api.md b/_search-plugins/sql/sql-ppl-api.md index a5b76a92486..33f2550b56c 100644 --- a/_search-plugins/sql/sql-ppl-api.md +++ b/_search-plugins/sql/sql-ppl-api.md @@ -15,14 +15,14 @@ Sends an SQL/PPL query to the SQL plugin. You can pass the format for the respon ### Query parameters -Parameter | Data Type | Description +Parameter | Data type | Description :--- | :--- | :--- [format]({{site.url}}{{site.baseurl}}/search-plugins/sql/response-formats/) | String | The format for the response. The `_sql` endpoint supports `jdbc`, `csv`, `raw`, and `json` formats. The `_ppl` endpoint supports `jdbc`, `csv`, and `raw` formats. Default is `jdbc`. sanitize | Boolean | Specifies whether to escape special characters in the results. See [Response formats]({{site.url}}{{site.baseurl}}/search-plugins/sql/response-formats/) for more information. Default is `true`. ### Request body fields -Field | Data Type | Description +Field | Data type | Description :--- | :--- | :--- query | String | The query to be executed. Required. [filter](#filtering-results) | JSON object | The filter for the results. Optional. @@ -151,7 +151,7 @@ The response contains the schema and the results: ### Response body fields -Field | Data Type | Description +Field | Data type | Description :--- | :--- | :--- schema | Array | Specifies the field names and types for all fields. data_rows | 2D array | An array of results. Each result represents one matching row (document). From cc156697bf8152aee1328db7775ce4a5c948fa6c Mon Sep 17 00:00:00 2001 From: AntonEliatra Date: Mon, 30 Jun 2025 19:45:01 +0100 Subject: [PATCH 5/8] Update standard.md Signed-off-by: AntonEliatra --- _analyzers/supported-analyzers/standard.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/supported-analyzers/standard.md b/_analyzers/supported-analyzers/standard.md index 84059e87a37..b38fddfd95c 100644 --- a/_analyzers/supported-analyzers/standard.md +++ b/_analyzers/supported-analyzers/standard.md @@ -49,7 +49,7 @@ The `standard` analyzer supports the following optional parameters: | `stopwords` | String or list of strings | None | A list of stopwords or a predefined stopword set like `_english_` to remove during analysis. | | `stopwords_path` | String | None | Path to a file containing stopwords to be used during analysis. | -Use only one of the parameters `stopwords` or `stopwords_path`. If both are used, no error is returned but only `stopwords` parameter is applied. +Only use one of the parameters `stopwords` or `stopwords_path`. If both are used, no error is returned but only `stopwords` parameter is applied. {: .note} ## Example: Analyzer with parameters From e99d29bf475bce3e82aabe5e8dea20194a8a6215 Mon Sep 17 00:00:00 2001 From: AntonEliatra Date: Thu, 3 Jul 2025 13:28:17 +0100 Subject: [PATCH 6/8] Apply suggestions from code review Co-authored-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Signed-off-by: AntonEliatra --- _analyzers/supported-analyzers/standard.md | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/_analyzers/supported-analyzers/standard.md b/_analyzers/supported-analyzers/standard.md index b38fddfd95c..cb7fc2edd2c 100644 --- a/_analyzers/supported-analyzers/standard.md +++ b/_analyzers/supported-analyzers/standard.md @@ -11,12 +11,11 @@ The `standard` analyzer is the built-in default analyzer used for general-purpos The `standard` analyzer performs the following operations: -- **Tokenization**: It uses the [`standard`]({{site.url}}{{site.baseurl}}/analyzers/tokenizers/standard/) tokenizer, which splits text into words based on Unicode text segmentation rules, handling spaces, punctuation, and common delimiters. -- **Lowercasing**: It applies the [`lowercase`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/lowercase/) token filter to convert all tokens to lowercase, ensuring consistent matching regardless of input case. +- **Tokenization**: Uses the [`standard`]({{site.url}}{{site.baseurl}}/analyzers/tokenizers/standard/) tokenizer, which splits text into words based on Unicode text segmentation rules, handling spaces, punctuation, and common delimiters. +- **Lowercasing**: Applies the [`lowercase`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/lowercase/) token filter to convert all tokens to lowercase, ensuring consistent matching regardless of input case. This combination makes the `standard` analyzer ideal for indexing a wide range of natural language content without needing language-specific customizations. ---- ## Example: Creating an index with the standard analyzer @@ -37,24 +36,23 @@ PUT /my_standard_index ``` {% include copy-curl.html %} ---- ## Parameters -The `standard` analyzer supports the following optional parameters: +The `standard` analyzer supports the following optional parameters. | Parameter | Data type | Default | Description | |:----------|:-----|:--------|:------------| -| `max_token_length` | Integer | `255` | Sets the maximum length of a token before it is split. | +| `max_token_length` | Integer | `255` | The maximum length of a token before it is split. | | `stopwords` | String or list of strings | None | A list of stopwords or a predefined stopword set like `_english_` to remove during analysis. | -| `stopwords_path` | String | None | Path to a file containing stopwords to be used during analysis. | +| `stopwords_path` | String | None | The path to a file containing stopwords to be used during analysis. | Only use one of the parameters `stopwords` or `stopwords_path`. If both are used, no error is returned but only `stopwords` parameter is applied. {: .note} ## Example: Analyzer with parameters -The following example creates index `products` and configures `max_token_length` and `stopwords`: +The following example creates a `products` index and configures the `max_token_length` and `stopwords` parameters: ```json PUT /animals @@ -76,7 +74,7 @@ PUT /animals ``` {% include copy-curl.html %} -Use the following `_analyze` API to see how the `my_manual_stopwords_analyzer` processes text: +Use the following `_analyze` API request to see how the `my_manual_stopwords_analyzer` processes text: ```json POST /animals/_analyze @@ -89,9 +87,9 @@ POST /animals/_analyze The returned tokens are: -- separated based on spacing -- lowercased -- stopwords removed +- Split on spaces +- Lowercased +- Stopwords removed ```json { From 401df5bf0d3bc1e6bf5bcf1a0c2099c03735f20c Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Thu, 3 Jul 2025 15:23:03 +0100 Subject: [PATCH 7/8] addressing the PR comments Signed-off-by: Anton Rubin --- _analyzers/supported-analyzers/standard.md | 2 +- _analyzers/token-filters/stop.md | 44 +++++++++++++++++++++- 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/_analyzers/supported-analyzers/standard.md b/_analyzers/supported-analyzers/standard.md index cb7fc2edd2c..e3df4825238 100644 --- a/_analyzers/supported-analyzers/standard.md +++ b/_analyzers/supported-analyzers/standard.md @@ -44,7 +44,7 @@ The `standard` analyzer supports the following optional parameters. | Parameter | Data type | Default | Description | |:----------|:-----|:--------|:------------| | `max_token_length` | Integer | `255` | The maximum length of a token before it is split. | -| `stopwords` | String or list of strings | None | A list of stopwords or a predefined stopword set like `_english_` to remove during analysis. | +| `stopwords` | String or list of strings | None | A list of stopwords or a [Predefined stopword sets by language]({{site.url}}{{site.baseurl}}/analyzers/token-filters/stop/#predefined-stopword-sets-by-language) to remove during analysis. For example `_english_`. | | `stopwords_path` | String | None | The path to a file containing stopwords to be used during analysis. | Only use one of the parameters `stopwords` or `stopwords_path`. If both are used, no error is returned but only `stopwords` parameter is applied. diff --git a/_analyzers/token-filters/stop.md b/_analyzers/token-filters/stop.md index 8f3e01b72da..c30da62a38a 100644 --- a/_analyzers/token-filters/stop.md +++ b/_analyzers/token-filters/stop.md @@ -17,7 +17,7 @@ The `stop` token filter can be configured with the following parameters. Parameter | Required/Optional | Data type | Description :--- | :--- | :--- | :--- -`stopwords` | Optional | String | Specifies either a custom array of stopwords or a language for which to fetch the predefined Lucene stopword list:

- [`_arabic_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt)
- [`_armenian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/hy/stopwords.txt)
- [`_basque_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/eu/stopwords.txt)
- [`_bengali_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/bn/stopwords.txt)
- [`_brazilian_` (Brazilian Portuguese)](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/br/stopwords.txt)
- [`_bulgarian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt)
- [`_catalan_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ca/stopwords.txt)
- [`_cjk_` (Chinese, Japanese, and Korean)](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/cjk/stopwords.txt)
- [`_czech_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/cz/stopwords.txt)
- [`_danish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/danish_stop.txt)
- [`_dutch_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/dutch_stop.txt)
- [`_english_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java#L48) (Default)
- [`_estonian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/et/stopwords.txt)
- [`_finnish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/finnish_stop.txt)
- [`_french_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/french_stop.txt)
- [`_galician_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/gl/stopwords.txt)
- [`_german_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/german_stop.txt)
- [`_greek_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/el/stopwords.txt)
- [`_hindi_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt)
- [`_hungarian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/hungarian_stop.txt)
- [`_indonesian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/id/stopwords.txt)
- [`_irish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ga/stopwords.txt)
- [`_italian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/italian_stop.txt)
- [`_latvian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/lv/stopwords.txt)
- [`_lithuanian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/lt/stopwords.txt)
- [`_norwegian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/norwegian_stop.txt)
- [`_persian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt)
- [`_portuguese_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/portuguese_stop.txt)
- [`_romanian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt)
- [`_russian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/russian_stop.txt)
- [`_sorani_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/sr/stopwords.txt)
- [`_spanish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ckb/stopwords.txt)
- [`_swedish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/swedish_stop.txt)
- [`_thai_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/th/stopwords.txt)
- [`_turkish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/tr/stopwords.txt) +`stopwords` | Optional | String | Specifies either a custom array of stopwords or the [Predefined stopword sets by language](#predefined-stopword-sets-by-language). Default is `_english_`. `stopwords_path` | Optional | String | Specifies the file path (absolute or relative to the config directory) of the file containing custom stopwords. `ignore_case` | Optional | Boolean | If `true`, stopwords will be matched regardless of their case. Default is `false`. `remove_trailing` | Optional | Boolean | If `true`, trailing stopwords will be removed during analysis. Default is `true`. @@ -108,4 +108,44 @@ The response contains the generated tokens: } ] } -``` \ No newline at end of file +``` + +## Predefined stopword sets by language + +The following is a list of all predefined stopword sets available by language: + +- [`_arabic_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ar/stopwords.txt) +- [`_armenian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/hy/stopwords.txt) +- [`_basque_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/eu/stopwords.txt) +- [`_bengali_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/bn/stopwords.txt) +- [`_brazilian_` (Brazilian Portuguese)](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/br/stopwords.txt) +- [`_bulgarian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/bg/stopwords.txt) +- [`_catalan_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ca/stopwords.txt) +- [`_cjk_` (Chinese, Japanese, and Korean)](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/cjk/stopwords.txt) +- [`_czech_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/cz/stopwords.txt) +- [`_danish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/danish_stop.txt) +- [`_dutch_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/dutch_stop.txt) +- [`_english_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java#L48) +- [`_estonian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/et/stopwords.txt) +- [`_finnish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/finnish_stop.txt) +- [`_french_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/french_stop.txt) +- [`_galician_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/gl/stopwords.txt) +- [`_german_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/german_stop.txt) +- [`_greek_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/el/stopwords.txt) +- [`_hindi_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/hi/stopwords.txt) +- [`_hungarian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/hungarian_stop.txt) +- [`_indonesian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/id/stopwords.txt) +- [`_irish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ga/stopwords.txt) +- [`_italian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/italian_stop.txt) +- [`_latvian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/lv/stopwords.txt) +- [`_lithuanian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/lt/stopwords.txt) +- [`_norwegian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/norwegian_stop.txt) +- [`_persian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/fa/stopwords.txt) +- [`_portuguese_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/portuguese_stop.txt) +- [`_romanian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ro/stopwords.txt) +- [`_russian_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/russian_stop.txt) +- [`_sorani_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/sr/stopwords.txt) +- [`_spanish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/ckb/stopwords.txt) +- [`_swedish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/snowball/swedish_stop.txt) +- [`_thai_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/th/stopwords.txt) +- [`_turkish_`](https://github.com/apache/lucene/blob/main/lucene/analysis/common/src/resources/org/apache/lucene/analysis/tr/stopwords.txt) \ No newline at end of file From f1ea6e3985234b4ad28844b99babb3af1aacdea4 Mon Sep 17 00:00:00 2001 From: AntonEliatra Date: Thu, 3 Jul 2025 17:32:51 +0100 Subject: [PATCH 8/8] Apply suggestions from code review Co-authored-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Signed-off-by: AntonEliatra --- _analyzers/token-filters/stop.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/token-filters/stop.md b/_analyzers/token-filters/stop.md index c30da62a38a..641b304037f 100644 --- a/_analyzers/token-filters/stop.md +++ b/_analyzers/token-filters/stop.md @@ -17,7 +17,7 @@ The `stop` token filter can be configured with the following parameters. Parameter | Required/Optional | Data type | Description :--- | :--- | :--- | :--- -`stopwords` | Optional | String | Specifies either a custom array of stopwords or the [Predefined stopword sets by language](#predefined-stopword-sets-by-language). Default is `_english_`. +`stopwords` | Optional | String | Specifies either a custom array of stopwords or a [predefined stopword set for a language](#predefined-stopword-sets-by-language). Default is `_english_`. `stopwords_path` | Optional | String | Specifies the file path (absolute or relative to the config directory) of the file containing custom stopwords. `ignore_case` | Optional | Boolean | If `true`, stopwords will be matched regardless of their case. Default is `false`. `remove_trailing` | Optional | Boolean | If `true`, trailing stopwords will be removed during analysis. Default is `true`.