diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 20fc9cfb7..0d5c1928f 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -3798,8 +3798,7 @@ async def open_point_in_time( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` @@ -5695,7 +5694,7 @@ async def search_shards( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -5807,8 +5806,7 @@ async def search_template( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param explain: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. @@ -6519,8 +6517,7 @@ async def update_by_query( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index ba9795103..2e06797e0 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -1774,7 +1774,200 @@ async def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1801,16 +1994,17 @@ async def nodes( to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -2029,7 +2223,74 @@ async def recovery( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, @@ -2060,13 +2321,14 @@ async def recovery( shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2200,7 +2462,52 @@ async def segments( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2226,7 +2533,8 @@ async def segments( :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from @@ -2234,9 +2542,9 @@ async def segments( from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2292,7 +2600,162 @@ async def shards( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -2323,11 +2786,11 @@ async def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2380,7 +2843,124 @@ async def snapshots( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "duration", + "end_epoch", + "end_time", + "failed_shards", + "id", + "indices", + "reason", + "repository", + "start_epoch", + "start_time", + "status", + "successful_shards", + "total_shards", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, @@ -2408,7 +2988,8 @@ async def snapshots( If any repository fails during the request, Elasticsearch returns an error. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information @@ -2655,7 +3236,62 @@ async def thread_pool( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2689,10 +3325,10 @@ async def thread_pool( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 78ab8d492..09b1b115a 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -870,9 +870,9 @@ async def put_settings( :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node - :param persistent: + :param persistent: The settings that persist after the cluster restarts. :param timeout: Explicit operation timeout - :param transient: + :param transient: The settings that do not persist after the cluster restarts. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" @@ -928,7 +928,7 @@ async def remote_info( This API returns information that reflects current state on the local cluster. The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. - To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

+ To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the /_resolve/cluster endpoint.

diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index 6e3e00524..b413e69ed 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -31,6 +31,8 @@ class EsqlClient(NamespacedClient): "columnar", "filter", "include_ccs_metadata", + "keep_alive", + "keep_on_completion", "locale", "params", "profile", @@ -147,10 +149,6 @@ async def async_query( __query["format"] = format if human is not None: __query["human"] = human - if keep_alive is not None: - __query["keep_alive"] = keep_alive - if keep_on_completion is not None: - __query["keep_on_completion"] = keep_on_completion if pretty is not None: __query["pretty"] = pretty if not __body: @@ -162,6 +160,10 @@ async def async_query( __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata + if keep_alive is not None: + __body["keep_alive"] = keep_alive + if keep_on_completion is not None: + __body["keep_on_completion"] = keep_on_completion if locale is not None: __body["locale"] = locale if params is not None: @@ -244,6 +246,14 @@ async def async_query_get( drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -269,6 +279,7 @@ async def async_query_get( will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + :param format: A short version of the Accept header, for example `json` or `yaml`. :param keep_alive: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. @@ -289,6 +300,8 @@ async def async_query_get( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index 38b3771d5..aca07e0ac 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -138,9 +138,9 @@ async def msearch( """ .. raw:: html -

Executes several fleet searches with a single API request. - The API follows the same structure as the multi search API. However, similar to the fleet search API, it - supports the wait_for_checkpoints parameter.

+

Executes several fleet searches with a single API request.

+

The API follows the same structure as the multi search (_msearch) API. + However, similar to the fleet search API, it supports the wait_for_checkpoints parameter.

``_ @@ -154,9 +154,9 @@ async def msearch( example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If @@ -400,9 +400,9 @@ async def search( :param aggs: :param allow_no_indices: :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param analyze_wildcard: :param analyzer: :param batched_reduce_size: diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index be758d06f..8edd134e2 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -173,7 +173,7 @@ async def analyze( The _analyze endpoint without a specified index will always use 10000 as its limit.

- ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -338,7 +338,7 @@ async def clear_cache( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. :param fields: Comma-separated list of field names used to limit the `fielddata` @@ -563,7 +563,7 @@ async def close( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -942,7 +942,7 @@ async def delete( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -1246,7 +1246,8 @@ async def delete_template( """ .. raw:: html -

Delete a legacy index template.

+

Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ @@ -1486,7 +1487,7 @@ async def exists( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -1570,7 +1571,7 @@ async def exists_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. :param local: If `true`, the request retrieves information from the local node @@ -1918,7 +1919,7 @@ async def flush( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param force: If `true`, the request forces a flush even if there are no changes to commit to the index. :param ignore_unavailable: If `false`, the request returns an error if it targets @@ -2236,7 +2237,7 @@ async def get_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2315,8 +2316,7 @@ async def get_data_lifecycle( wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `open`, `closed`, `hidden`, `none`. + Supports comma-separated values, such as `open,hidden`. :param include_defaults: If `true`, return all default settings in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -2512,7 +2512,7 @@ async def get_field_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. @@ -2668,7 +2668,7 @@ async def get_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2878,7 +2878,7 @@ async def get_template( """ .. raw:: html -

Get index templates. +

Get legacy index templates. Get information about one or more index templates.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

@@ -3160,7 +3160,7 @@ async def open( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -3419,8 +3419,7 @@ async def put_data_lifecycle( for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `hidden`, `open`, `closed`, `none`. + Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -3732,7 +3731,7 @@ async def put_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param field_names: Control whether field names are enabled for the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -3850,8 +3849,34 @@ async def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

To revert a setting to the default value, use a null value. - The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

+

There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

+
{
+            "number_of_replicas": 1
+          }
+          
+

Or you can use an index setting object:

+
{
+            "index": {
+              "number_of_replicas": 1
+            }
+          }
+          
+

Or you can use dot annotation:

+
{
+            "index.number_of_replicas": 1
+          }
+          
+

Or you can embed any of the aforementioned options in a settings object. For example:

+
{
+            "settings": {
+              "index": {
+                "number_of_replicas": 1
+              }
+            }
+          }
+          

NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3971,7 +3996,7 @@ async def put_template( """ .. raw:: html -

Create or update an index template. +

Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

@@ -4175,7 +4200,7 @@ async def refresh( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4374,10 +4399,9 @@ async def resolve_cluster( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - NOTE: This option is only supported when specifying an index expression. - You will get an error if you specify index options to the `_resolve/cluster` - API endpoint that takes no index expression. + as `open,hidden`. NOTE: This option is only supported when specifying an + index expression. You will get an error if you specify index options to the + `_resolve/cluster` API endpoint that takes no index expression. :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to @@ -4470,7 +4494,7 @@ async def resolve_index( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4685,7 +4709,7 @@ async def segments( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param verbose: If `true`, the request returns a verbose response. @@ -5598,7 +5622,7 @@ async def validate_query( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param explain: If `true`, the response returns detailed information if an error has occurred. :param ignore_unavailable: If `false`, the request returns an error if it targets diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 7d90246e5..8ea5252f7 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -370,22 +370,37 @@ async def put( """ .. raw:: html -

Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+

Create an inference endpoint.

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+

The following integrations are available through the inference API. You can find the available task types next to the integration name:

+
    +
  • AlibabaCloud AI Search (completion, rerank, sparse_embedding, text_embedding)
  • +
  • Amazon Bedrock (completion, text_embedding)
  • +
  • Anthropic (completion)
  • +
  • Azure AI Studio (completion, text_embedding)
  • +
  • Azure OpenAI (completion, text_embedding)
  • +
  • Cohere (completion, rerank, text_embedding)
  • +
  • Elasticsearch (rerank, sparse_embedding, text_embedding - this service is for built-in models and models uploaded through Eland)
  • +
  • ELSER (sparse_embedding)
  • +
  • Google AI Studio (completion, text_embedding)
  • +
  • Google Vertex AI (rerank, text_embedding)
  • +
  • Hugging Face (text_embedding)
  • +
  • Mistral (text_embedding)
  • +
  • OpenAI (chat_completion, completion, text_embedding)
  • +
  • VoyageAI (text_embedding, rerank)
  • +
  • Watsonx inference integration (text_embedding)
  • +
  • JinaAI (text_embedding, rerank)
  • +
``_ :param inference_id: The inference Id :param inference_config: - :param task_type: The task type + :param task_type: The task type. Refer to the integration list in the API description + for the available task types. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -458,11 +473,6 @@ async def put_alibabacloud(

Create an AlibabaCloud AI Search inference endpoint.

Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -553,16 +563,11 @@ async def put_amazonbedrock( .. raw:: html

Create an Amazon Bedrock inference endpoint.

-

Creates an inference endpoint to perform an inference task with the amazonbedrock service.

+

Create an inference endpoint to perform an inference task with the amazonbedrock service.

info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -654,11 +659,6 @@ async def put_anthropic(

Create an Anthropic inference endpoint.

Create an inference endpoint to perform an inference task with the anthropic service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -751,11 +751,6 @@ async def put_azureaistudio(

Create an Azure AI studio inference endpoint.

Create an inference endpoint to perform an inference task with the azureaistudio service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -853,11 +848,6 @@ async def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -951,11 +941,6 @@ async def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1239,11 +1224,6 @@ async def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1331,11 +1311,6 @@ async def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1434,11 +1409,6 @@ async def put_hugging_face(
  • multilingual-e5-base
  • multilingual-e5-small
  • -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1528,11 +1498,6 @@ async def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1616,11 +1581,6 @@ async def put_mistral(

    Create a Mistral inference endpoint.

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1709,11 +1669,6 @@ async def put_openai(

    Create an OpenAI inference endpoint.

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1890,11 +1845,6 @@ async def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index ca2eb96fb..0b0cd2e89 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -3876,13 +3876,7 @@ async def put_job( :param description: A description of the job. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param groups: A list of job groups. A job can belong to no groups or many. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. @@ -5145,13 +5139,7 @@ async def update_datafeed( check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 94120d920..63e72e477 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -108,7 +108,8 @@ async def get_repositories_metering_info( ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned - information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). + information. For more information about the nodes selective options, refer + to the node specification documentation. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index a95b9449c..943a5adf1 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -3796,8 +3796,7 @@ def open_point_in_time( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` @@ -5693,7 +5692,7 @@ def search_shards( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -5805,8 +5804,7 @@ def search_template( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param explain: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. @@ -6517,8 +6515,7 @@ def update_by_query( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 6389cd70d..53f3c088e 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -1774,7 +1774,200 @@ def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1801,16 +1994,17 @@ def nodes( to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -2029,7 +2223,74 @@ def recovery( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, @@ -2060,13 +2321,14 @@ def recovery( shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2200,7 +2462,52 @@ def segments( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2226,7 +2533,8 @@ def segments( :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from @@ -2234,9 +2542,9 @@ def segments( from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2292,7 +2600,162 @@ def shards( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -2323,11 +2786,11 @@ def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2380,7 +2843,124 @@ def snapshots( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "duration", + "end_epoch", + "end_time", + "failed_shards", + "id", + "indices", + "reason", + "repository", + "start_epoch", + "start_time", + "status", + "successful_shards", + "total_shards", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, @@ -2408,7 +2988,8 @@ def snapshots( If any repository fails during the request, Elasticsearch returns an error. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information @@ -2655,7 +3236,62 @@ def thread_pool( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2689,10 +3325,10 @@ def thread_pool( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index a77f6dd5e..0cf2625d6 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -870,9 +870,9 @@ def put_settings( :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node - :param persistent: + :param persistent: The settings that persist after the cluster restarts. :param timeout: Explicit operation timeout - :param transient: + :param transient: The settings that do not persist after the cluster restarts. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" @@ -928,7 +928,7 @@ def remote_info( This API returns information that reflects current state on the local cluster. The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. - To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

    + To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the /_resolve/cluster endpoint.

    diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index b4ddd2052..b91308ea8 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -31,6 +31,8 @@ class EsqlClient(NamespacedClient): "columnar", "filter", "include_ccs_metadata", + "keep_alive", + "keep_on_completion", "locale", "params", "profile", @@ -147,10 +149,6 @@ def async_query( __query["format"] = format if human is not None: __query["human"] = human - if keep_alive is not None: - __query["keep_alive"] = keep_alive - if keep_on_completion is not None: - __query["keep_on_completion"] = keep_on_completion if pretty is not None: __query["pretty"] = pretty if not __body: @@ -162,6 +160,10 @@ def async_query( __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata + if keep_alive is not None: + __body["keep_alive"] = keep_alive + if keep_on_completion is not None: + __body["keep_on_completion"] = keep_on_completion if locale is not None: __body["locale"] = locale if params is not None: @@ -244,6 +246,14 @@ def async_query_get( drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -269,6 +279,7 @@ def async_query_get( will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + :param format: A short version of the Accept header, for example `json` or `yaml`. :param keep_alive: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. @@ -289,6 +300,8 @@ def async_query_get( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index b27c06f09..1d8284625 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -138,9 +138,9 @@ def msearch( """ .. raw:: html -

    Executes several fleet searches with a single API request. - The API follows the same structure as the multi search API. However, similar to the fleet search API, it - supports the wait_for_checkpoints parameter.

    +

    Executes several fleet searches with a single API request.

    +

    The API follows the same structure as the multi search (_msearch) API. + However, similar to the fleet search API, it supports the wait_for_checkpoints parameter.

    ``_ @@ -154,9 +154,9 @@ def msearch( example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If @@ -400,9 +400,9 @@ def search( :param aggs: :param allow_no_indices: :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param analyze_wildcard: :param analyzer: :param batched_reduce_size: diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 625e4a89f..cc98bdcf4 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -173,7 +173,7 @@ def analyze( The _analyze endpoint without a specified index will always use 10000 as its limit.

    - ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -338,7 +338,7 @@ def clear_cache( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. :param fields: Comma-separated list of field names used to limit the `fielddata` @@ -563,7 +563,7 @@ def close( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -942,7 +942,7 @@ def delete( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -1246,7 +1246,8 @@ def delete_template( """ .. raw:: html -

    Delete a legacy index template.

    +

    Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    ``_ @@ -1486,7 +1487,7 @@ def exists( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -1570,7 +1571,7 @@ def exists_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. :param local: If `true`, the request retrieves information from the local node @@ -1918,7 +1919,7 @@ def flush( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param force: If `true`, the request forces a flush even if there are no changes to commit to the index. :param ignore_unavailable: If `false`, the request returns an error if it targets @@ -2236,7 +2237,7 @@ def get_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2315,8 +2316,7 @@ def get_data_lifecycle( wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `open`, `closed`, `hidden`, `none`. + Supports comma-separated values, such as `open,hidden`. :param include_defaults: If `true`, return all default settings in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -2512,7 +2512,7 @@ def get_field_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. @@ -2668,7 +2668,7 @@ def get_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2878,7 +2878,7 @@ def get_template( """ .. raw:: html -

    Get index templates. +

    Get legacy index templates. Get information about one or more index templates.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    @@ -3160,7 +3160,7 @@ def open( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -3419,8 +3419,7 @@ def put_data_lifecycle( for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `hidden`, `open`, `closed`, `none`. + Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -3732,7 +3731,7 @@ def put_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param field_names: Control whether field names are enabled for the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -3850,8 +3849,34 @@ def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

    To revert a setting to the default value, use a null value. - The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

    +

    There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

    +
    {
    +            "number_of_replicas": 1
    +          }
    +          
    +

    Or you can use an index setting object:

    +
    {
    +            "index": {
    +              "number_of_replicas": 1
    +            }
    +          }
    +          
    +

    Or you can use dot annotation:

    +
    {
    +            "index.number_of_replicas": 1
    +          }
    +          
    +

    Or you can embed any of the aforementioned options in a settings object. For example:

    +
    {
    +            "settings": {
    +              "index": {
    +                "number_of_replicas": 1
    +              }
    +            }
    +          }
    +          

    NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3971,7 +3996,7 @@ def put_template( """ .. raw:: html -

    Create or update an index template. +

    Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    @@ -4175,7 +4200,7 @@ def refresh( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4374,10 +4399,9 @@ def resolve_cluster( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - NOTE: This option is only supported when specifying an index expression. - You will get an error if you specify index options to the `_resolve/cluster` - API endpoint that takes no index expression. + as `open,hidden`. NOTE: This option is only supported when specifying an + index expression. You will get an error if you specify index options to the + `_resolve/cluster` API endpoint that takes no index expression. :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to @@ -4470,7 +4494,7 @@ def resolve_index( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4685,7 +4709,7 @@ def segments( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param verbose: If `true`, the request returns a verbose response. @@ -5598,7 +5622,7 @@ def validate_query( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param explain: If `true`, the response returns detailed information if an error has occurred. :param ignore_unavailable: If `false`, the request returns an error if it targets diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index e77ad84f0..4b1390c29 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -370,22 +370,37 @@ def put( """ .. raw:: html -

    Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint.

    IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    +

    The following integrations are available through the inference API. You can find the available task types next to the integration name:

    +
      +
    • AlibabaCloud AI Search (completion, rerank, sparse_embedding, text_embedding)
    • +
    • Amazon Bedrock (completion, text_embedding)
    • +
    • Anthropic (completion)
    • +
    • Azure AI Studio (completion, text_embedding)
    • +
    • Azure OpenAI (completion, text_embedding)
    • +
    • Cohere (completion, rerank, text_embedding)
    • +
    • Elasticsearch (rerank, sparse_embedding, text_embedding - this service is for built-in models and models uploaded through Eland)
    • +
    • ELSER (sparse_embedding)
    • +
    • Google AI Studio (completion, text_embedding)
    • +
    • Google Vertex AI (rerank, text_embedding)
    • +
    • Hugging Face (text_embedding)
    • +
    • Mistral (text_embedding)
    • +
    • OpenAI (chat_completion, completion, text_embedding)
    • +
    • VoyageAI (text_embedding, rerank)
    • +
    • Watsonx inference integration (text_embedding)
    • +
    • JinaAI (text_embedding, rerank)
    • +
    ``_ :param inference_id: The inference Id :param inference_config: - :param task_type: The task type + :param task_type: The task type. Refer to the integration list in the API description + for the available task types. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -458,11 +473,6 @@ def put_alibabacloud(

    Create an AlibabaCloud AI Search inference endpoint.

    Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -553,16 +563,11 @@ def put_amazonbedrock( .. raw:: html

    Create an Amazon Bedrock inference endpoint.

    -

    Creates an inference endpoint to perform an inference task with the amazonbedrock service.

    +

    Create an inference endpoint to perform an inference task with the amazonbedrock service.

    info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -654,11 +659,6 @@ def put_anthropic(

    Create an Anthropic inference endpoint.

    Create an inference endpoint to perform an inference task with the anthropic service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -751,11 +751,6 @@ def put_azureaistudio(

    Create an Azure AI studio inference endpoint.

    Create an inference endpoint to perform an inference task with the azureaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -853,11 +848,6 @@ def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -951,11 +941,6 @@ def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1239,11 +1224,6 @@ def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1331,11 +1311,6 @@ def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1434,11 +1409,6 @@ def put_hugging_face(
  • multilingual-e5-base
  • multilingual-e5-small
  • -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1528,11 +1498,6 @@ def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1616,11 +1581,6 @@ def put_mistral(

    Create a Mistral inference endpoint.

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1709,11 +1669,6 @@ def put_openai(

    Create an OpenAI inference endpoint.

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1890,11 +1845,6 @@ def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 01fd7c73d..e5ca6172c 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -3876,13 +3876,7 @@ def put_job( :param description: A description of the job. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param groups: A list of job groups. A job can belong to no groups or many. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. @@ -5145,13 +5139,7 @@ def update_datafeed( check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 1ecbc7015..603d1fede 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -108,7 +108,8 @@ def get_repositories_metering_info( ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned - information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). + information. For more information about the nodes selective options, refer + to the node specification documentation. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'")