From e827db1803b9827b6c6341d6042bbb0d9b59bfa8 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Wed, 13 May 2020 13:20:15 -0500 Subject: [PATCH 1/5] Add AIOHttpConnection --- dev-requirements.txt | 4 + elasticsearch/__init__.py | 12 + elasticsearch/_async/__init__.py | 3 + elasticsearch/_async/compat.py | 23 ++ elasticsearch/_async/http_aiohttp.py | 307 +++++++++++++++++ elasticsearch/connection/base.py | 1 + setup.py | 2 + test_elasticsearch/run_tests.py | 6 +- test_elasticsearch/test_async/__init__.py | 3 + .../test_async/test_connection.py | 310 ++++++++++++++++++ 10 files changed, 670 insertions(+), 1 deletion(-) create mode 100644 elasticsearch/_async/__init__.py create mode 100644 elasticsearch/_async/compat.py create mode 100644 elasticsearch/_async/http_aiohttp.py create mode 100644 test_elasticsearch/test_async/__init__.py create mode 100644 test_elasticsearch/test_async/test_connection.py diff --git a/dev-requirements.txt b/dev-requirements.txt index 130cccf01..b7f95b66f 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -14,3 +14,7 @@ pandas pyyaml<5.3 black; python_version>="3.6" + +# Requirements for testing [async] extra +aiohttp; python_version>="3.6" +pytest-asyncio; python_version>="3.6" diff --git a/elasticsearch/__init__.py b/elasticsearch/__init__.py index 87c4a0061..a13cef1a6 100644 --- a/elasticsearch/__init__.py +++ b/elasticsearch/__init__.py @@ -9,6 +9,7 @@ __version__ = VERSION __versionstr__ = ".".join(map(str, VERSION)) +import sys import logging import warnings @@ -64,3 +65,14 @@ "AuthorizationException", "ElasticsearchDeprecationWarning", ] + +try: + # Asyncio only supported on Python 3.6+ + if sys.version_info < (3, 6): + raise ImportError + + from ._async.http_aiohttp import AIOHttpConnection + + __all__ += ["AIOHttpConnection"] +except (ImportError, SyntaxError): + pass diff --git a/elasticsearch/_async/__init__.py b/elasticsearch/_async/__init__.py new file mode 100644 index 000000000..1a3c439ef --- /dev/null +++ b/elasticsearch/_async/__init__.py @@ -0,0 +1,3 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information diff --git a/elasticsearch/_async/compat.py b/elasticsearch/_async/compat.py new file mode 100644 index 000000000..3c188c236 --- /dev/null +++ b/elasticsearch/_async/compat.py @@ -0,0 +1,23 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +import asyncio + +# Hack supporting Python 3.6 asyncio which didn't have 'get_running_loop()'. +# Essentially we want to get away from having users pass in a loop to us. +# Instead we should call 'get_running_loop()' whenever we need +# the currently running loop. +# See: https://aiopg.readthedocs.io/en/stable/run_loop.html#implementation +try: + from asyncio import get_running_loop +except ImportError: + + def get_running_loop(): + loop = asyncio.get_event_loop() + if not loop.is_running(): + raise RuntimeError("no running event loop") + return loop + + +__all__ = ["get_running_loop"] diff --git a/elasticsearch/_async/http_aiohttp.py b/elasticsearch/_async/http_aiohttp.py new file mode 100644 index 000000000..3c064a4a0 --- /dev/null +++ b/elasticsearch/_async/http_aiohttp.py @@ -0,0 +1,307 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +import asyncio +import ssl +import os +import urllib3 +import warnings + +import aiohttp +import yarl +from aiohttp.client_exceptions import ServerFingerprintMismatch, ServerTimeoutError + +from .compat import get_running_loop +from ..connection import Connection +from ..compat import urlencode +from ..exceptions import ( + ConnectionError, + ConnectionTimeout, + ImproperlyConfigured, + SSLError, +) + + +# sentinel value for `verify_certs`. +# This is used to detect if a user is passing in a value +# for SSL kwargs if also using an SSLContext. +VERIFY_CERTS_DEFAULT = object() +SSL_SHOW_WARN_DEFAULT = object() + +CA_CERTS = None + +try: + import certifi + + CA_CERTS = certifi.where() +except ImportError: + pass + + +class AIOHttpConnection(Connection): + def __init__( + self, + host="localhost", + port=None, + http_auth=None, + use_ssl=False, + verify_certs=VERIFY_CERTS_DEFAULT, + ssl_show_warn=SSL_SHOW_WARN_DEFAULT, + ca_certs=None, + client_cert=None, + client_key=None, + ssl_version=None, + ssl_assert_fingerprint=None, + maxsize=10, + headers=None, + ssl_context=None, + http_compress=None, + cloud_id=None, + api_key=None, + opaque_id=None, + loop=None, + **kwargs, + ): + """ + Default connection class for ``AsyncElasticsearch`` using the `aiohttp` library and the http protocol. + + :arg host: hostname of the node (default: localhost) + :arg port: port to use (integer, default: 9200) + :arg timeout: default timeout in seconds (float, default: 10) + :arg http_auth: optional http auth information as either ':' separated + string or a tuple + :arg use_ssl: use ssl for the connection if `True` + :arg verify_certs: whether to verify SSL certificates + :arg ssl_show_warn: show warning when verify certs is disabled + :arg ca_certs: optional path to CA bundle. + See https://urllib3.readthedocs.io/en/latest/security.html#using-certifi-with-urllib3 + for instructions how to get default set + :arg client_cert: path to the file containing the private key and the + certificate, or cert only if using client_key + :arg client_key: path to the file containing the private key if using + separate cert and key files (client_cert will contain only the cert) + :arg ssl_version: version of the SSL protocol to use. Choices are: + SSLv23 (default) SSLv2 SSLv3 TLSv1 (see ``PROTOCOL_*`` constants in the + ``ssl`` module for exact options for your environment). + :arg ssl_assert_hostname: use hostname verification if not `False` + :arg ssl_assert_fingerprint: verify the supplied certificate fingerprint if not `None` + :arg maxsize: the number of connections which will be kept open to this + host. See https://urllib3.readthedocs.io/en/1.4/pools.html#api for more + information. + :arg headers: any custom http headers to be add to requests + :arg http_compress: Use gzip compression + :arg cloud_id: The Cloud ID from ElasticCloud. Convenient way to connect to cloud instances. + Other host connection params will be ignored. + :arg api_key: optional API Key authentication as either base64 encoded string or a tuple. + :arg opaque_id: Send this value in the 'X-Opaque-Id' HTTP header + For tracing all requests made by this transport. + :arg loop: asyncio Event Loop to use with aiohttp. This is set by default to the currently running loop. + """ + + self.headers = {} + + super().__init__( + host=host, + port=port, + use_ssl=use_ssl, + headers=headers, + http_compress=http_compress, + cloud_id=cloud_id, + api_key=api_key, + opaque_id=opaque_id, + **kwargs, + ) + + if http_auth is not None: + if isinstance(http_auth, (tuple, list)): + http_auth = ":".join(http_auth) + self.headers.update(urllib3.make_headers(basic_auth=http_auth)) + + # if providing an SSL context, raise error if any other SSL related flag is used + if ssl_context and ( + (verify_certs is not VERIFY_CERTS_DEFAULT) + or (ssl_show_warn is not SSL_SHOW_WARN_DEFAULT) + or ca_certs + or client_cert + or client_key + or ssl_version + ): + warnings.warn( + "When using `ssl_context`, all other SSL related kwargs are ignored" + ) + + self.ssl_assert_fingerprint = ssl_assert_fingerprint + if self.use_ssl and ssl_context is None: + ssl_context = ssl.SSLContext(ssl_version or ssl.PROTOCOL_TLS) + + # Convert all sentinel values to their actual default + # values if not using an SSLContext. + if verify_certs is VERIFY_CERTS_DEFAULT: + verify_certs = True + if ssl_show_warn is SSL_SHOW_WARN_DEFAULT: + ssl_show_warn = True + + if verify_certs: + ssl_context.verify_mode = ssl.CERT_REQUIRED + ssl_context.check_hostname = True + else: + ssl_context.verify_mode = ssl.CERT_NONE + ssl_context.check_hostname = False + + ca_certs = CA_CERTS if ca_certs is None else ca_certs + if verify_certs: + if not ca_certs: + raise ImproperlyConfigured( + "Root certificates are missing for certificate " + "validation. Either pass them in using the ca_certs parameter or " + "install certifi to use it automatically." + ) + else: + if ssl_show_warn: + warnings.warn( + "Connecting to %s using SSL with verify_certs=False is insecure." + % self.host + ) + + if os.path.isfile(ca_certs): + ssl_context.load_verify_locations(cafile=ca_certs) + elif os.path.isdir(ca_certs): + ssl_context.load_verify_locations(capath=ca_certs) + else: + raise ImproperlyConfigured("ca_certs parameter is not a path") + + self.headers.setdefault("connection", "keep-alive") + self.loop = loop + self.session = None + + # Parameters for creating an aiohttp.ClientSession later. + self._limit = maxsize + self._http_auth = http_auth + self._ssl_context = ssl_context + + async def perform_request( + self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None + ): + if self.session is None: + await self._create_aiohttp_session() + + orig_body = body + url_path = url + if params: + query_string = urlencode(params) + else: + query_string = "" + + # There is a bug in aiohttp that disables the re-use + # of the connection in the pool when method=HEAD. + # See: aio-libs/aiohttp#1769 + is_head = False + if method == "HEAD": + method = "GET" + is_head = True + + # Provide correct URL object to avoid string parsing in low-level code + url = yarl.URL.build( + scheme=self.scheme, + host=self.hostname, + port=self.port, + path=url, + query_string=query_string, + encoded=True, + ) + + timeout = aiohttp.ClientTimeout( + total=timeout if timeout is not None else self.timeout + ) + + req_headers = self.headers.copy() + if headers: + req_headers.update(headers) + + if self.http_compress and body: + body = self._gzip_compress(body) + req_headers["content-encoding"] = "gzip" + + start = self.loop.time() + try: + async with self.session.request( + method, + url, + data=body, + headers=req_headers, + timeout=timeout, + fingerprint=self.ssl_assert_fingerprint, + ) as response: + if is_head: # We actually called 'GET' so throw away the data. + await response.release() + raw_data = "" + else: + raw_data = await response.text() + duration = self.loop.time() - start + + # We want to reraise a cancellation. + except asyncio.CancelledError: + raise + + except Exception as e: + self.log_request_fail( + method, url, url_path, orig_body, self.loop.time() - start, exception=e + ) + if isinstance(e, ServerFingerprintMismatch): + raise SSLError("N/A", str(e), e) + if isinstance(e, (asyncio.TimeoutError, ServerTimeoutError)): + raise ConnectionTimeout("TIMEOUT", str(e), e) + raise ConnectionError("N/A", str(e), e) + + # raise errors based on http status codes, let the client handle those if needed + if not (200 <= response.status < 300) and response.status not in ignore: + self.log_request_fail( + method, + url, + url_path, + orig_body, + duration, + status_code=response.status, + response=raw_data, + ) + self._raise_error(response.status, raw_data) + + self.log_request_success( + method, url, url_path, orig_body, response.status, raw_data, duration + ) + + return response.status, response.headers, raw_data + + async def close(self): + """ + Explicitly closes connection + """ + if self.session: + await self.session.close() + + async def _create_aiohttp_session(self): + """Creates an aiohttp.ClientSession(). This is delayed until + the first call to perform_request() so that AsyncTransport has + a chance to set AIOHttpConnection.loop + """ + if self.loop is None: + self.loop = get_running_loop() + self.session = aiohttp.ClientSession( + headers=self.headers, + auto_decompress=True, + loop=self.loop, + cookie_jar=aiohttp.DummyCookieJar(), + response_class=ESClientResponse, + connector=aiohttp.TCPConnector( + limit=self._limit, use_dns_cache=True, ssl=self._ssl_context, + ), + ) + + +class ESClientResponse(aiohttp.ClientResponse): + async def text(self, encoding=None, errors="strict"): + if self._body is None: + await self.read() + + return self._body.decode("utf-8", "surrogatepass") diff --git a/elasticsearch/connection/base.py b/elasticsearch/connection/base.py index 20b7c7cab..c93683099 100644 --- a/elasticsearch/connection/base.py +++ b/elasticsearch/connection/base.py @@ -121,6 +121,7 @@ def __init__( self.use_ssl = use_ssl self.http_compress = http_compress or False + self.scheme = scheme self.hostname = host self.port = port self.host = "%s://%s" % (scheme, host) diff --git a/setup.py b/setup.py index 7ac28f080..3ec8262d8 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,7 @@ "pytest", "pytest-cov", ] +async_require = ["aiohttp>=3,<4", "yarl"] docs_require = ["sphinx<1.7", "sphinx_rtd_theme"] generate_require = ["black", "jinja2"] @@ -67,5 +68,6 @@ "develop": tests_require + docs_require + generate_require, "docs": docs_require, "requests": ["requests>=2.4.0, <3.0.0"], + "async": async_require, }, ) diff --git a/test_elasticsearch/run_tests.py b/test_elasticsearch/run_tests.py index 64eb083cf..c91dca384 100755 --- a/test_elasticsearch/run_tests.py +++ b/test_elasticsearch/run_tests.py @@ -79,9 +79,13 @@ def run_all(argv=None): "--log-level=DEBUG", "--cache-clear", "-vv", - abspath(dirname(__file__)), ] + if sys.version_info < (3, 6): + argv.append("--ignore=test_elasticsearch/test_async/") + + argv.append(abspath(dirname(__file__)),) + exit_code = 0 try: subprocess.check_call(argv, stdout=sys.stdout, stderr=sys.stderr) diff --git a/test_elasticsearch/test_async/__init__.py b/test_elasticsearch/test_async/__init__.py new file mode 100644 index 000000000..1a3c439ef --- /dev/null +++ b/test_elasticsearch/test_async/__init__.py @@ -0,0 +1,3 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information diff --git a/test_elasticsearch/test_async/test_connection.py b/test_elasticsearch/test_async/test_connection.py new file mode 100644 index 000000000..2989a5ba8 --- /dev/null +++ b/test_elasticsearch/test_async/test_connection.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +import ssl +import gzip +import io +from mock import patch +import warnings +from platform import python_version +import aiohttp +import pytest + +from elasticsearch import AIOHttpConnection +from elasticsearch import __versionstr__ +from ..test_cases import TestCase, SkipTest + +pytestmark = pytest.mark.asyncio + + +def gzip_decompress(data): + buf = gzip.GzipFile(fileobj=io.BytesIO(data), mode="rb") + return buf.read() + + +class TestAIOHttpConnection(TestCase): + async def _get_mock_connection(self, connection_params={}, response_body=b"{}"): + con = AIOHttpConnection(**connection_params) + await con._create_aiohttp_session() + + def _dummy_request(*args, **kwargs): + class DummyResponse: + async def __aenter__(self, *_, **__): + return self + + async def __aexit__(self, *_, **__): + pass + + async def text(self): + return response_body.decode("utf-8", "surrogatepass") + + dummy_response = DummyResponse() + dummy_response.headers = {} + dummy_response.status = 200 + _dummy_request.call_args = (args, kwargs) + return dummy_response + + con.session.request = _dummy_request + return con + + async def test_ssl_context(self): + try: + context = ssl.create_default_context() + except AttributeError: + # if create_default_context raises an AttributeError Exception + # it means SSLContext is not available for that version of python + # and we should skip this test. + raise SkipTest( + "Test test_ssl_context is skipped cause SSLContext is not available for this version of ptyhon" + ) + + con = AIOHttpConnection(use_ssl=True, ssl_context=context) + await con._create_aiohttp_session() + self.assertTrue(con.use_ssl) + self.assertEqual(con.session.connector._ssl, context) + + def test_opaque_id(self): + con = AIOHttpConnection(opaque_id="app-1") + self.assertEqual(con.headers["x-opaque-id"], "app-1") + + def test_http_cloud_id(self): + con = AIOHttpConnection( + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==" + ) + self.assertTrue(con.use_ssl) + self.assertEqual( + con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io" + ) + self.assertEqual(con.port, None) + self.assertEqual( + con.hostname, "4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io" + ) + self.assertTrue(con.http_compress) + + con = AIOHttpConnection( + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", + port=9243, + ) + self.assertEqual( + con.host, + "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io:9243", + ) + self.assertEqual(con.port, 9243) + self.assertEqual( + con.hostname, "4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io" + ) + + def test_api_key_auth(self): + # test with tuple + con = AIOHttpConnection( + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", + api_key=("elastic", "changeme1"), + ) + self.assertEqual( + con.headers["authorization"], "ApiKey ZWxhc3RpYzpjaGFuZ2VtZTE=" + ) + self.assertEqual( + con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io" + ) + + # test with base64 encoded string + con = AIOHttpConnection( + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", + api_key="ZWxhc3RpYzpjaGFuZ2VtZTI=", + ) + self.assertEqual( + con.headers["authorization"], "ApiKey ZWxhc3RpYzpjaGFuZ2VtZTI=" + ) + self.assertEqual( + con.host, "https://4fa8821e75634032bed1cf22110e2f97.us-east-1.aws.found.io" + ) + + async def test_no_http_compression(self): + con = await self._get_mock_connection() + self.assertFalse(con.http_compress) + self.assertNotIn("accept-encoding", con.headers) + + await con.perform_request("GET", "/") + + _, kwargs = con.session.request.call_args + + self.assertFalse(kwargs["data"]) + self.assertNotIn("accept-encoding", kwargs["headers"]) + self.assertNotIn("content-encoding", kwargs["headers"]) + + async def test_http_compression(self): + con = await self._get_mock_connection({"http_compress": True}) + self.assertTrue(con.http_compress) + self.assertEqual(con.headers["accept-encoding"], "gzip,deflate") + + # 'content-encoding' shouldn't be set at a connection level. + # Should be applied only if the request is sent with a body. + self.assertNotIn("content-encoding", con.headers) + + await con.perform_request("GET", "/", body=b"{}") + + _, kwargs = con.session.request.call_args + + self.assertEqual(gzip_decompress(kwargs["data"]), b"{}") + self.assertEqual(kwargs["headers"]["accept-encoding"], "gzip,deflate") + self.assertEqual(kwargs["headers"]["content-encoding"], "gzip") + + await con.perform_request("GET", "/") + + _, kwargs = con.session.request.call_args + + self.assertFalse(kwargs["data"]) + self.assertEqual(kwargs["headers"]["accept-encoding"], "gzip,deflate") + self.assertNotIn("content-encoding", kwargs["headers"]) + + def test_cloud_id_http_compress_override(self): + # 'http_compress' will be 'True' by default for connections with + # 'cloud_id' set but should prioritize user-defined values. + con = AIOHttpConnection( + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", + ) + self.assertEqual(con.http_compress, True) + + con = AIOHttpConnection( + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", + http_compress=False, + ) + self.assertEqual(con.http_compress, False) + + con = AIOHttpConnection( + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", + http_compress=True, + ) + self.assertEqual(con.http_compress, True) + + def test_default_user_agent(self): + con = AIOHttpConnection() + self.assertEqual( + con._get_default_user_agent(), + "elasticsearch-py/%s (Python %s)" % (__versionstr__, python_version()), + ) + + def test_timeout_set(self): + con = AIOHttpConnection(timeout=42) + self.assertEqual(42, con.timeout) + + def test_keep_alive_is_on_by_default(self): + con = AIOHttpConnection() + self.assertEqual( + { + "connection": "keep-alive", + "content-type": "application/json", + "user-agent": con._get_default_user_agent(), + }, + con.headers, + ) + + def test_http_auth(self): + con = AIOHttpConnection(http_auth="username:secret") + self.assertEqual( + { + "authorization": "Basic dXNlcm5hbWU6c2VjcmV0", + "connection": "keep-alive", + "content-type": "application/json", + "user-agent": con._get_default_user_agent(), + }, + con.headers, + ) + + def test_http_auth_tuple(self): + con = AIOHttpConnection(http_auth=("username", "secret")) + self.assertEqual( + { + "authorization": "Basic dXNlcm5hbWU6c2VjcmV0", + "content-type": "application/json", + "connection": "keep-alive", + "user-agent": con._get_default_user_agent(), + }, + con.headers, + ) + + def test_http_auth_list(self): + con = AIOHttpConnection(http_auth=["username", "secret"]) + self.assertEqual( + { + "authorization": "Basic dXNlcm5hbWU6c2VjcmV0", + "content-type": "application/json", + "connection": "keep-alive", + "user-agent": con._get_default_user_agent(), + }, + con.headers, + ) + + def test_uses_https_if_verify_certs_is_off(self): + with warnings.catch_warnings(record=True) as w: + con = AIOHttpConnection(use_ssl=True, verify_certs=False) + self.assertEqual(1, len(w)) + self.assertEqual( + "Connecting to https://localhost:9200 using SSL with verify_certs=False is insecure.", + str(w[0].message), + ) + + self.assertTrue(con.use_ssl) + self.assertEqual(con.scheme, "https") + self.assertEqual(con.host, "https://localhost:9200") + + async def test_nowarn_when_test_uses_https_if_verify_certs_is_off(self): + with warnings.catch_warnings(record=True) as w: + con = AIOHttpConnection( + use_ssl=True, verify_certs=False, ssl_show_warn=False + ) + await con._create_aiohttp_session() + self.assertEqual(0, len(w)) + + self.assertIsInstance(con.session, aiohttp.ClientSession) + + def test_doesnt_use_https_if_not_specified(self): + con = AIOHttpConnection() + self.assertFalse(con.use_ssl) + + def test_no_warning_when_using_ssl_context(self): + ctx = ssl.create_default_context() + with warnings.catch_warnings(record=True) as w: + AIOHttpConnection(ssl_context=ctx) + self.assertEqual(0, len(w), str([x.message for x in w])) + + def test_warns_if_using_non_default_ssl_kwargs_with_ssl_context(self): + for kwargs in ( + {"ssl_show_warn": False}, + {"ssl_show_warn": True}, + {"verify_certs": True}, + {"verify_certs": False}, + {"ca_certs": "/path/to/certs"}, + {"ssl_show_warn": True, "ca_certs": "/path/to/certs"}, + ): + kwargs["ssl_context"] = ssl.create_default_context() + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + AIOHttpConnection(**kwargs) + + self.assertEqual(1, len(w)) + self.assertEqual( + "When using `ssl_context`, all other SSL related kwargs are ignored", + str(w[0].message), + ) + + @patch("elasticsearch.connection.base.logger") + async def test_uncompressed_body_logged(self, logger): + con = await self._get_mock_connection(connection_params={"http_compress": True}) + await con.perform_request("GET", "/", body=b'{"example": "body"}') + + self.assertEqual(2, logger.debug.call_count) + req, resp = logger.debug.call_args_list + + self.assertEqual('> {"example": "body"}', req[0][0] % req[0][1:]) + self.assertEqual("< {}", resp[0][0] % resp[0][1:]) + + async def test_surrogatepass_into_bytes(self): + buf = b"\xe4\xbd\xa0\xe5\xa5\xbd\xed\xa9\xaa" + con = await self._get_mock_connection(response_body=buf) + status, headers, data = await con.perform_request("GET", "/") + self.assertEqual(u"你好\uda6a", data) From a549a2dd3338ef0a6da86ed596472f7003ab3c60 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Wed, 13 May 2020 13:21:42 -0500 Subject: [PATCH 2/5] Add AsyncTransport --- elasticsearch/__init__.py | 3 +- elasticsearch/_async/transport.py | 331 ++++++++++++++ elasticsearch/connection_pool.py | 21 +- elasticsearch/transport.py | 93 ++-- .../test_async/test_transport.py | 425 ++++++++++++++++++ 5 files changed, 835 insertions(+), 38 deletions(-) create mode 100644 elasticsearch/_async/transport.py create mode 100644 test_elasticsearch/test_async/test_transport.py diff --git a/elasticsearch/__init__.py b/elasticsearch/__init__.py index a13cef1a6..c1a7c27bc 100644 --- a/elasticsearch/__init__.py +++ b/elasticsearch/__init__.py @@ -72,7 +72,8 @@ raise ImportError from ._async.http_aiohttp import AIOHttpConnection + from ._async.transport import AsyncTransport - __all__ += ["AIOHttpConnection"] + __all__ += ["AIOHttpConnection", "AsyncTransport"] except (ImportError, SyntaxError): pass diff --git a/elasticsearch/_async/transport.py b/elasticsearch/_async/transport.py new file mode 100644 index 000000000..ac5d0b2ba --- /dev/null +++ b/elasticsearch/_async/transport.py @@ -0,0 +1,331 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +import asyncio +import logging +from itertools import chain + +from .compat import get_running_loop +from .http_aiohttp import AIOHttpConnection +from ..transport import Transport +from ..exceptions import ( + TransportError, + ConnectionTimeout, + ConnectionError, + SerializationError, +) + + +logger = logging.getLogger("elasticsearch") + + +class AsyncTransport(Transport): + """ + Encapsulation of transport-related to logic. Handles instantiation of the + individual connections as well as creating a connection pool to hold them. + + Main interface is the `perform_request` method. + """ + + DEFAULT_CONNECTION_CLASS = AIOHttpConnection + + def __init__(self, hosts, *args, sniff_on_start=False, **kwargs): + """ + :arg hosts: list of dictionaries, each containing keyword arguments to + create a `connection_class` instance + :arg connection_class: subclass of :class:`~elasticsearch.Connection` to use + :arg connection_pool_class: subclass of :class:`~elasticsearch.ConnectionPool` to use + :arg host_info_callback: callback responsible for taking the node information from + `/_cluster/nodes`, along with already extracted information, and + producing a list of arguments (same as `hosts` parameter) + :arg sniff_on_start: flag indicating whether to obtain a list of nodes + from the cluster at startup time + :arg sniffer_timeout: number of seconds between automatic sniffs + :arg sniff_on_connection_fail: flag controlling if connection failure triggers a sniff + :arg sniff_timeout: timeout used for the sniff request - it should be a + fast api call and we are talking potentially to more nodes so we want + to fail quickly. Not used during initial sniffing (if + ``sniff_on_start`` is on) when the connection still isn't + initialized. + :arg serializer: serializer instance + :arg serializers: optional dict of serializer instances that will be + used for deserializing data coming from the server. (key is the mimetype) + :arg default_mimetype: when no mimetype is specified by the server + response assume this mimetype, defaults to `'application/json'` + :arg max_retries: maximum number of retries before an exception is propagated + :arg retry_on_status: set of HTTP status codes on which we should retry + on a different node. defaults to ``(502, 503, 504)`` + :arg retry_on_timeout: should timeout trigger a retry on different + node? (default `False`) + :arg send_get_body_as: for GET requests with body this option allows + you to specify an alternate way of execution for environments that + don't support passing bodies with GET requests. If you set this to + 'POST' a POST method will be used instead, if to 'source' then the body + will be serialized and passed as a query parameter `source`. + + Any extra keyword arguments will be passed to the `connection_class` + when creating and instance unless overridden by that connection's + options provided as part of the hosts parameter. + """ + self.sniffing_task = None + self.loop = None + self._async_init_called = False + + super(AsyncTransport, self).__init__( + *args, hosts=[], sniff_on_start=False, **kwargs + ) + + # Don't enable sniffing on Cloud instances. + if kwargs.get("cloud_id", False): + sniff_on_start = False + + # Since we defer connections / sniffing to not occur + # within the constructor we never want to signal to + # our parent to 'sniff_on_start' or non-empty 'hosts'. + self.hosts = hosts + self.sniff_on_start = sniff_on_start + + async def _async_init(self): + """This is our stand-in for an async constructor. Everything + that was deferred within __init__() should be done here now. + + This method will only be called once per AsyncTransport instance + and is called from one of AsyncElasticsearch.__aenter__(), + AsyncTransport.perform_request() or AsyncTransport.get_connection() + """ + # Detect the async loop we're running in and set it + # on all already created HTTP connections. + self.loop = get_running_loop() + self.kwargs["loop"] = self.loop + + # Now that we have a loop we can create all our HTTP connections + self.set_connections(self.hosts) + self.seed_connections = list(self.connection_pool.connections[:]) + + # ... and we can start sniffing in the background. + if self.sniffing_task is None and self.sniff_on_start: + self.last_sniff = self.loop.time() + self.create_sniff_task(initial=True) + + async def _async_call(self): + """This method is called within any async method of AsyncTransport + where the transport is not closing. This will check to see if we should + call our _async_init() or create a new sniffing task + """ + if not self._async_init_called: + self._async_init_called = True + await self._async_init() + + if self.sniffer_timeout: + if self.loop.time() >= self.last_sniff + self.sniff_timeout: + self.create_sniff_task() + + async def _get_node_info(self, conn, initial): + try: + # use small timeout for the sniffing request, should be a fast api call + _, headers, node_info = await conn.perform_request( + "GET", + "/_nodes/_all/http", + timeout=self.sniff_timeout if not initial else None, + ) + return self.deserializer.loads(node_info, headers.get("content-type")) + except Exception: + pass + return None + + async def _get_sniff_data(self, initial=False): + previous_sniff = self.last_sniff + + # reset last_sniff timestamp + self.last_sniff = self.loop.time() + + # use small timeout for the sniffing request, should be a fast api call + timeout = self.sniff_timeout if not initial else None + + def _sniff_request(conn): + return self.loop.create_task( + conn.perform_request("GET", "/_nodes/_all/http", timeout=timeout) + ) + + # Go through all current connections as well as the + # seed_connections for good measure + tasks = [] + for conn in self.connection_pool.connections: + tasks.append(_sniff_request(conn)) + for conn in self.seed_connections: + # Ensure that we don't have any duplication within seed_connections. + if conn in self.connection_pool.connections: + continue + tasks.append(_sniff_request(conn)) + + done = () + try: + while tasks: + # execute sniff requests in parallel, wait for first to return + done, tasks = await asyncio.wait( + tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop + ) + # go through all the finished tasks + for t in done: + try: + _, headers, node_info = t.result() + node_info = self.deserializer.loads( + node_info, headers.get("content-type") + ) + except (ConnectionError, SerializationError): + continue + node_info = list(node_info["nodes"].values()) + return node_info + else: + # no task has finished completely + raise TransportError("N/A", "Unable to sniff hosts.") + except Exception: + # keep the previous value on error + self.last_sniff = previous_sniff + raise + finally: + # Cancel all the pending tasks + for task in chain(done, tasks): + task.cancel() + + async def sniff_hosts(self, initial=False): + """Either spawns a sniffing_task which does regular sniffing + over time or does a single sniffing session and awaits the results. + """ + # Without a loop we can't do anything. + if not self.loop: + return + + node_info = await self._get_sniff_data(initial) + hosts = list(filter(None, (self._get_host_info(n) for n in node_info))) + + # we weren't able to get any nodes, maybe using an incompatible + # transport_schema or host_info_callback blocked all - raise error. + if not hosts: + raise TransportError( + "N/A", "Unable to sniff hosts - no viable hosts found." + ) + + # remember current live connections + orig_connections = self.connection_pool.connections[:] + self.set_connections(hosts) + # close those connections that are not in use any more + for c in orig_connections: + if c not in self.connection_pool.connections: + await c.close() + + def create_sniff_task(self, initial=False): + """ + Initiate a sniffing task. Make sure we only have one sniff request + running at any given time. If a finished sniffing request is around, + collect its result (which can raise its exception). + """ + if self.sniffing_task and self.sniffing_task.done(): + try: + if self.sniffing_task is not None: + self.sniffing_task.result() + finally: + self.sniffing_task = None + + if self.sniffing_task is None: + self.sniffing_task = self.loop.create_task(self.sniff_hosts(initial)) + + def mark_dead(self, connection): + """ + Mark a connection as dead (failed) in the connection pool. If sniffing + on failure is enabled this will initiate the sniffing process. + + :arg connection: instance of :class:`~elasticsearch.Connection` that failed + """ + self.connection_pool.mark_dead(connection) + if self.sniff_on_connection_fail: + self.create_sniff_task() + + def get_connection(self): + return self.connection_pool.get_connection() + + async def perform_request(self, method, url, headers=None, params=None, body=None): + """ + Perform the actual request. Retrieve a connection from the connection + pool, pass all the information to it's perform_request method and + return the data. + + If an exception was raised, mark the connection as failed and retry (up + to `max_retries` times). + + If the operation was successful and the connection used was previously + marked as dead, mark it as live, resetting it's failure count. + + :arg method: HTTP method to use + :arg url: absolute url (without host) to target + :arg headers: dictionary of headers, will be handed over to the + underlying :class:`~elasticsearch.Connection` class + :arg params: dictionary of query parameters, will be handed over to the + underlying :class:`~elasticsearch.Connection` class for serialization + :arg body: body of the request, will be serialized using serializer and + passed to the connection + """ + await self._async_call() + + method, params, body, ignore, timeout = self._resolve_request_args( + method, params, body + ) + + for attempt in range(self.max_retries + 1): + connection = self.get_connection() + + try: + status, headers, data = await connection.perform_request( + method, + url, + params, + body, + headers=headers, + ignore=ignore, + timeout=timeout, + ) + except TransportError as e: + if method == "HEAD" and e.status_code == 404: + return False + + retry = False + if isinstance(e, ConnectionTimeout): + retry = self.retry_on_timeout + elif isinstance(e, ConnectionError): + retry = True + elif e.status_code in self.retry_on_status: + retry = True + + if retry: + # only mark as dead if we are retrying + self.mark_dead(connection) + # raise exception on last retry + if attempt == self.max_retries: + raise + else: + raise + + else: + if method == "HEAD": + return 200 <= status < 300 + + # connection didn't fail, confirm it's live status + self.connection_pool.mark_live(connection) + if data: + data = self.deserializer.loads(data, headers.get("content-type")) + return data + + async def close(self): + """ + Explicitly closes connections + """ + if self.sniffing_task: + try: + self.sniffing_task.cancel() + await self.sniffing_task + except asyncio.CancelledError: + pass + self.sniffing_task = None + for connection in self.connection_pool.connections: + await connection.close() diff --git a/elasticsearch/connection_pool.py b/elasticsearch/connection_pool.py index 496c7414e..db694b25b 100644 --- a/elasticsearch/connection_pool.py +++ b/elasticsearch/connection_pool.py @@ -256,9 +256,12 @@ def close(self): """ Explicitly closes connections """ - for conn in self.orig_connections: + for conn in self.connections: conn.close() + def __repr__(self): + return "<%s: %r>" % (type(self).__name__, self.connections) + class DummyConnectionPool(ConnectionPool): def __init__(self, connections, **kwargs): @@ -284,3 +287,19 @@ def _noop(self, *args, **kwargs): pass mark_dead = mark_live = resurrect = _noop + + +class EmptyConnectionPool(ConnectionPool): + """A connection pool that is empty. Errors out if used.""" + + def __init__(self, *_, **__): + self.connections = [] + self.connection_opts = [] + + def get_connection(self): + raise ImproperlyConfigured("No connections were configured") + + def _noop(self, *args, **kwargs): + pass + + close = mark_dead = mark_live = resurrect = _noop diff --git a/elasticsearch/transport.py b/elasticsearch/transport.py index faac81a55..79e8d5405 100644 --- a/elasticsearch/transport.py +++ b/elasticsearch/transport.py @@ -6,7 +6,7 @@ from itertools import chain from .connection import Urllib3HttpConnection -from .connection_pool import ConnectionPool, DummyConnectionPool +from .connection_pool import ConnectionPool, DummyConnectionPool, EmptyConnectionPool from .serializer import JSONSerializer, Deserializer, DEFAULT_SERIALIZERS from .exceptions import ( ConnectionError, @@ -44,10 +44,12 @@ class Transport(object): Main interface is the `perform_request` method. """ + DEFAULT_CONNECTION_CLASS = Urllib3HttpConnection + def __init__( self, hosts, - connection_class=Urllib3HttpConnection, + connection_class=None, connection_pool_class=ConnectionPool, host_info_callback=get_host_info, sniff_on_start=False, @@ -100,6 +102,8 @@ def __init__( when creating and instance unless overridden by that connection's options provided as part of the hosts parameter. """ + if connection_class is None: + connection_class = self.DEFAULT_CONNECTION_CLASS # serialization config _serializers = DEFAULT_SERIALIZERS.copy() @@ -127,10 +131,18 @@ def __init__( self.kwargs = kwargs self.hosts = hosts - # ...and instantiate them - self.set_connections(hosts) - # retain the original connection instances for sniffing - self.seed_connections = self.connection_pool.connections[:] + # Start with an empty pool specifically for `AsyncTransport`. + # It should never be used, will be replaced on first call to + # .set_connections() + self.connection_pool = EmptyConnectionPool() + + if hosts: + # ...and instantiate them + self.set_connections(hosts) + # retain the original connection instances for sniffing + self.seed_connections = list(self.connection_pool.connections[:]) + else: + self.seed_connections = [] # Don't enable sniffing on Cloud instances. if kwargs.get("cloud_id", False): @@ -139,6 +151,7 @@ def __init__( # sniffing data self.sniffer_timeout = sniffer_timeout + self.sniff_on_start = sniff_on_start self.sniff_on_connection_fail = sniff_on_connection_fail self.last_sniff = time.time() self.sniff_timeout = sniff_timeout @@ -321,36 +334,9 @@ def perform_request(self, method, url, headers=None, params=None, body=None): :arg body: body of the request, will be serialized using serializer and passed to the connection """ - if body is not None: - body = self.serializer.dumps(body) - - # some clients or environments don't support sending GET with body - if method in ("HEAD", "GET") and self.send_get_body_as != "GET": - # send it as post instead - if self.send_get_body_as == "POST": - method = "POST" - - # or as source parameter - elif self.send_get_body_as == "source": - if params is None: - params = {} - params["source"] = body - body = None - - if body is not None: - try: - body = body.encode("utf-8", "surrogatepass") - except (UnicodeDecodeError, AttributeError): - # bytes/str - no need to re-encode - pass - - ignore = () - timeout = None - if params: - timeout = params.pop("request_timeout", None) - ignore = params.pop("ignore", ()) - if isinstance(ignore, int): - ignore = (ignore,) + method, params, body, ignore, timeout = self._resolve_request_args( + method, params, body + ) for attempt in range(self.max_retries + 1): connection = self.get_connection() @@ -405,3 +391,38 @@ def close(self): Explicitly closes connections """ self.connection_pool.close() + + def _resolve_request_args(self, method, params, body): + """Resolves parameters for .perform_request()""" + if body is not None: + body = self.serializer.dumps(body) + + # some clients or environments don't support sending GET with body + if method in ("HEAD", "GET") and self.send_get_body_as != "GET": + # send it as post instead + if self.send_get_body_as == "POST": + method = "POST" + + # or as source parameter + elif self.send_get_body_as == "source": + if params is None: + params = {} + params["source"] = body + body = None + + if body is not None: + try: + body = body.encode("utf-8", "surrogatepass") + except (UnicodeDecodeError, AttributeError): + # bytes/str - no need to re-encode + pass + + ignore = () + timeout = None + if params: + timeout = params.pop("request_timeout", None) + ignore = params.pop("ignore", ()) + if isinstance(ignore, int): + ignore = (ignore,) + + return method, params, body, ignore, timeout diff --git a/test_elasticsearch/test_async/test_transport.py b/test_elasticsearch/test_async/test_transport.py new file mode 100644 index 000000000..8e5b51a2f --- /dev/null +++ b/test_elasticsearch/test_async/test_transport.py @@ -0,0 +1,425 @@ +# -*- coding: utf-8 -*- +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from __future__ import unicode_literals +import asyncio +from mock import patch +import pytest + +from elasticsearch import AsyncTransport +from elasticsearch.connection import Connection +from elasticsearch.connection_pool import DummyConnectionPool +from elasticsearch.exceptions import ConnectionError + + +pytestmark = pytest.mark.asyncio + + +class DummyConnection(Connection): + def __init__(self, **kwargs): + self.exception = kwargs.pop("exception", None) + self.status, self.data = kwargs.pop("status", 200), kwargs.pop("data", "{}") + self.headers = kwargs.pop("headers", {}) + self.delay = kwargs.pop("delay", 0) + self.calls = [] + self.closed = False + super(DummyConnection, self).__init__(**kwargs) + + async def perform_request(self, *args, **kwargs): + if self.closed: + raise RuntimeError("This connection is closed") + if self.delay: + await asyncio.sleep(self.delay) + self.calls.append((args, kwargs)) + if self.exception: + raise self.exception + return self.status, self.headers, self.data + + async def close(self): + if self.closed: + raise RuntimeError("This connection is already closed") + self.closed = True + + +CLUSTER_NODES = """{ + "_nodes" : { + "total" : 1, + "successful" : 1, + "failed" : 0 + }, + "cluster_name" : "elasticsearch", + "nodes" : { + "SRZpKFZdQguhhvifmN6UVA" : { + "name" : "SRZpKFZ", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ "master", "data", "ingest" ], + "http" : { + "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], + "publish_address" : "1.1.1.1:123", + "max_content_length_in_bytes" : 104857600 + } + } + } +}""" + +CLUSTER_NODES_7x_PUBLISH_HOST = """{ + "_nodes" : { + "total" : 1, + "successful" : 1, + "failed" : 0 + }, + "cluster_name" : "elasticsearch", + "nodes" : { + "SRZpKFZdQguhhvifmN6UVA" : { + "name" : "SRZpKFZ", + "transport_address" : "127.0.0.1:9300", + "host" : "127.0.0.1", + "ip" : "127.0.0.1", + "version" : "5.0.0", + "build_hash" : "253032b", + "roles" : [ "master", "data", "ingest" ], + "http" : { + "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], + "publish_address" : "somehost.tld/1.1.1.1:123", + "max_content_length_in_bytes" : 104857600 + } + } + } +}""" + + +class TestTransport: + async def test_single_connection_uses_dummy_connection_pool(self): + t = AsyncTransport([{}]) + await t._async_call() + assert isinstance(t.connection_pool, DummyConnectionPool) + t = AsyncTransport([{"host": "localhost"}]) + await t._async_call() + assert isinstance(t.connection_pool, DummyConnectionPool) + + async def test_request_timeout_extracted_from_params_and_passed(self): + t = AsyncTransport([{}], connection_class=DummyConnection) + + await t.perform_request("GET", "/", params={"request_timeout": 42}) + assert 1 == len(t.get_connection().calls) + assert ("GET", "/", {}, None) == t.get_connection().calls[0][0] + assert { + "timeout": 42, + "ignore": (), + "headers": None, + } == t.get_connection().calls[0][1] + + async def test_opaque_id(self): + t = AsyncTransport([{}], opaque_id="app-1", connection_class=DummyConnection) + + await t.perform_request("GET", "/") + assert 1 == len(t.get_connection().calls) + assert ("GET", "/", None, None) == t.get_connection().calls[0][0] + assert { + "timeout": None, + "ignore": (), + "headers": None, + } == t.get_connection().calls[0][1] + + # Now try with an 'x-opaque-id' set on perform_request(). + await t.perform_request("GET", "/", headers={"x-opaque-id": "request-1"}) + assert 2 == len(t.get_connection().calls) + assert ("GET", "/", None, None) == t.get_connection().calls[1][0] + assert { + "timeout": None, + "ignore": (), + "headers": {"x-opaque-id": "request-1"}, + } == t.get_connection().calls[1][1] + + async def test_request_with_custom_user_agent_header(self): + t = AsyncTransport([{}], connection_class=DummyConnection) + + await t.perform_request( + "GET", "/", headers={"user-agent": "my-custom-value/1.2.3"} + ) + assert 1 == len(t.get_connection().calls) + assert { + "timeout": None, + "ignore": (), + "headers": {"user-agent": "my-custom-value/1.2.3"}, + } == t.get_connection().calls[0][1] + + async def test_send_get_body_as_source(self): + t = AsyncTransport( + [{}], send_get_body_as="source", connection_class=DummyConnection + ) + + await t.perform_request("GET", "/", body={}) + assert 1 == len(t.get_connection().calls) + assert ("GET", "/", {"source": "{}"}, None) == t.get_connection().calls[0][0] + + async def test_send_get_body_as_post(self): + t = AsyncTransport( + [{}], send_get_body_as="POST", connection_class=DummyConnection + ) + + await t.perform_request("GET", "/", body={}) + assert 1 == len(t.get_connection().calls) + assert ("POST", "/", None, b"{}") == t.get_connection().calls[0][0] + + async def test_body_gets_encoded_into_bytes(self): + t = AsyncTransport([{}], connection_class=DummyConnection) + + await t.perform_request("GET", "/", body="你好") + assert 1 == len(t.get_connection().calls) + assert ( + "GET", + "/", + None, + b"\xe4\xbd\xa0\xe5\xa5\xbd", + ) == t.get_connection().calls[0][0] + + async def test_body_bytes_get_passed_untouched(self): + t = AsyncTransport([{}], connection_class=DummyConnection) + + body = b"\xe4\xbd\xa0\xe5\xa5\xbd" + await t.perform_request("GET", "/", body=body) + assert 1 == len(t.get_connection().calls) + assert ("GET", "/", None, body) == t.get_connection().calls[0][0] + + async def test_body_surrogates_replaced_encoded_into_bytes(self): + t = AsyncTransport([{}], connection_class=DummyConnection) + + await t.perform_request("GET", "/", body="你好\uda6a") + assert 1 == len(t.get_connection().calls) + assert ( + "GET", + "/", + None, + b"\xe4\xbd\xa0\xe5\xa5\xbd\xed\xa9\xaa", + ) == t.get_connection().calls[0][0] + + async def test_kwargs_passed_on_to_connections(self): + t = AsyncTransport([{"host": "google.com"}], port=123) + await t._async_call() + assert 1 == len(t.connection_pool.connections) + assert "http://google.com:123" == t.connection_pool.connections[0].host + + async def test_kwargs_passed_on_to_connection_pool(self): + dt = object() + t = AsyncTransport([{}, {}], dead_timeout=dt) + await t._async_call() + assert dt is t.connection_pool.dead_timeout + + async def test_custom_connection_class(self): + class MyConnection(object): + def __init__(self, **kwargs): + self.kwargs = kwargs + + t = AsyncTransport([{}], connection_class=MyConnection) + await t._async_call() + assert 1 == len(t.connection_pool.connections) + assert isinstance(t.connection_pool.connections[0], MyConnection) + + def test_add_connection(self): + t = AsyncTransport([{}], randomize_hosts=False) + t.add_connection({"host": "google.com", "port": 1234}) + + assert 2 == len(t.connection_pool.connections) + assert "http://google.com:1234" == t.connection_pool.connections[1].host + + async def test_request_will_fail_after_X_retries(self): + t = AsyncTransport( + [{"exception": ConnectionError("abandon ship")}], + connection_class=DummyConnection, + ) + + connection_error = False + try: + await t.perform_request("GET", "/") + except ConnectionError: + connection_error = True + + assert connection_error + assert 4 == len(t.get_connection().calls) + + async def test_failed_connection_will_be_marked_as_dead(self): + t = AsyncTransport( + [{"exception": ConnectionError("abandon ship")}] * 2, + connection_class=DummyConnection, + ) + + connection_error = False + try: + await t.perform_request("GET", "/") + except ConnectionError: + connection_error = True + + assert connection_error + assert 0 == len(t.connection_pool.connections) + + async def test_resurrected_connection_will_be_marked_as_live_on_success(self): + t = AsyncTransport([{}, {}], connection_class=DummyConnection) + await t._async_call() + con1 = t.connection_pool.get_connection() + con2 = t.connection_pool.get_connection() + t.connection_pool.mark_dead(con1) + t.connection_pool.mark_dead(con2) + + await t.perform_request("GET", "/") + assert 1 == len(t.connection_pool.connections) + assert 1 == len(t.connection_pool.dead_count) + + async def test_sniff_will_use_seed_connections(self): + t = AsyncTransport([{"data": CLUSTER_NODES}], connection_class=DummyConnection) + await t._async_call() + t.set_connections([{"data": "invalid"}]) + + await t.sniff_hosts() + assert 1 == len(t.connection_pool.connections) + assert "http://1.1.1.1:123" == t.get_connection().host + + async def test_sniff_on_start_fetches_and_uses_nodes_list(self): + t = AsyncTransport( + [{"data": CLUSTER_NODES}], + connection_class=DummyConnection, + sniff_on_start=True, + ) + await t._async_call() + await t.sniffing_task # Need to wait for the sniffing task to complete + + assert 1 == len(t.connection_pool.connections) + assert "http://1.1.1.1:123" == t.get_connection().host + + async def test_sniff_on_start_ignores_sniff_timeout(self): + t = AsyncTransport( + [{"data": CLUSTER_NODES}], + connection_class=DummyConnection, + sniff_on_start=True, + sniff_timeout=12, + ) + await t._async_call() + await t.sniffing_task # Need to wait for the sniffing task to complete + + assert (("GET", "/_nodes/_all/http"), {"timeout": None}) == t.seed_connections[ + 0 + ].calls[0] + + async def test_sniff_uses_sniff_timeout(self): + t = AsyncTransport( + [{"data": CLUSTER_NODES}], + connection_class=DummyConnection, + sniff_timeout=42, + ) + await t._async_call() + await t.sniff_hosts() + + assert (("GET", "/_nodes/_all/http"), {"timeout": 42}) == t.seed_connections[ + 0 + ].calls[0] + + async def test_sniff_reuses_connection_instances_if_possible(self): + t = AsyncTransport( + [{"data": CLUSTER_NODES}, {"host": "1.1.1.1", "port": 123}], + connection_class=DummyConnection, + randomize_hosts=False, + ) + await t._async_call() + connection = t.connection_pool.connections[1] + connection.delay = 3.0 # Add this delay to make the sniffing deterministic. + + await t.sniff_hosts() + assert 1 == len(t.connection_pool.connections) + assert connection is t.get_connection() + + async def test_sniff_on_fail_triggers_sniffing_on_fail(self): + t = AsyncTransport( + [{"exception": ConnectionError("abandon ship")}, {"data": CLUSTER_NODES}], + connection_class=DummyConnection, + sniff_on_connection_fail=True, + max_retries=0, + randomize_hosts=False, + ) + await t._async_call() + + connection_error = False + try: + await t.perform_request("GET", "/") + except ConnectionError: + connection_error = True + + await t.sniffing_task # Need to wait for the sniffing task to complete + + assert connection_error + assert 1 == len(t.connection_pool.connections) + assert "http://1.1.1.1:123" == t.get_connection().host + + async def test_sniff_after_n_seconds(self, event_loop): + t = AsyncTransport( + [{"data": CLUSTER_NODES}], + connection_class=DummyConnection, + sniffer_timeout=5, + ) + await t._async_call() + + for _ in range(4): + await t.perform_request("GET", "/") + assert 1 == len(t.connection_pool.connections) + assert isinstance(t.get_connection(), DummyConnection) + t.last_sniff = event_loop.time() - 5.1 + + await t.perform_request("GET", "/") + await t.sniffing_task # Need to wait for the sniffing task to complete + + assert 1 == len(t.connection_pool.connections) + assert "http://1.1.1.1:123" == t.get_connection().host + assert event_loop.time() - 1 < t.last_sniff < event_loop.time() + 0.01 + + async def test_sniff_7x_publish_host(self): + # Test the response shaped when a 7.x node has publish_host set + # and the returend data is shaped in the fqdn/ip:port format. + t = AsyncTransport( + [{"data": CLUSTER_NODES_7x_PUBLISH_HOST}], + connection_class=DummyConnection, + sniff_timeout=42, + ) + await t._async_call() + await t.sniff_hosts() + # Ensure we parsed out the fqdn and port from the fqdn/ip:port string. + assert t.connection_pool.connection_opts[0][1] == { + "host": "somehost.tld", + "port": 123, + } + + @patch("elasticsearch._async.transport.AsyncTransport.sniff_hosts") + async def test_sniffing_disabled_on_cloud_instances(self, sniff_hosts): + t = AsyncTransport( + [{}], + sniff_on_start=True, + sniff_on_connection_fail=True, + connection_class=DummyConnection, + cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", + ) + await t._async_call() + + assert not t.sniff_on_connection_fail + assert sniff_hosts.call_args is None # Assert not called. + await t.perform_request("GET", "/", body={}) + assert 1 == len(t.get_connection().calls) + assert ("GET", "/", None, b"{}") == t.get_connection().calls[0][0] + + async def test_transport_close_closes_all_pool_connections(self): + t = AsyncTransport([{}], connection_class=DummyConnection) + await t._async_call() + + assert not any([conn.closed for conn in t.connection_pool.connections]) + await t.close() + assert all([conn.closed for conn in t.connection_pool.connections]) + + t = AsyncTransport([{}, {}], connection_class=DummyConnection) + await t._async_call() + + assert not any([conn.closed for conn in t.connection_pool.connections]) + await t.close() + assert all([conn.closed for conn in t.connection_pool.connections]) From e0c4df002c6dce16c7878a8b7e53cb8fcf3f9393 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Fri, 15 May 2020 09:36:47 -0500 Subject: [PATCH 3/5] Update API generator for async --- dev-requirements.txt | 1 + elasticsearch/__init__.py | 3 +- elasticsearch/_async/client/__init__.py | 1976 +++++++++++++++++ elasticsearch/_async/client/async_search.py | 191 ++ elasticsearch/_async/client/autoscaling.py | 75 + elasticsearch/_async/client/cat.py | 713 ++++++ elasticsearch/_async/client/ccr.py | 259 +++ elasticsearch/_async/client/cluster.py | 361 +++ elasticsearch/_async/client/enrich.py | 89 + elasticsearch/_async/client/eql.py | 29 + elasticsearch/_async/client/graph.py | 31 + elasticsearch/_async/client/ilm.py | 162 ++ elasticsearch/_async/client/indices.py | 1364 ++++++++++++ elasticsearch/_async/client/ingest.py | 99 + elasticsearch/_async/client/license.py | 98 + elasticsearch/_async/client/migration.py | 24 + elasticsearch/_async/client/ml.py | 1480 ++++++++++++ elasticsearch/_async/client/monitoring.py | 34 + elasticsearch/_async/client/nodes.py | 160 ++ elasticsearch/_async/client/remote.py | 16 + elasticsearch/_async/client/rollup.py | 155 ++ .../_async/client/searchable_snapshots.py | 92 + elasticsearch/_async/client/security.py | 497 +++++ elasticsearch/_async/client/slm.py | 135 ++ elasticsearch/_async/client/snapshot.py | 233 ++ elasticsearch/_async/client/sql.py | 56 + elasticsearch/_async/client/ssl.py | 18 + elasticsearch/_async/client/tasks.py | 84 + elasticsearch/_async/client/transform.py | 208 ++ elasticsearch/_async/client/utils.py | 130 ++ elasticsearch/_async/client/watcher.py | 180 ++ elasticsearch/_async/client/xpack.py | 36 + elasticsearch/_async/compat.py | 1 + elasticsearch/client/__init__.py | 62 +- elasticsearch/client/utils.py | 47 +- .../test_async/test_server/__init__.py | 0 .../test_async/test_server/conftest.py | 0 .../test_async/test_server/test_clients.py | 0 .../test_server/test_rest_api_spec.py | 0 utils/generate_api.py | 42 +- utils/templates/base | 4 +- .../templates/overrides/__init__/clear_scroll | 2 +- utils/templates/overrides/__init__/create | 2 +- utils/templates/overrides/__init__/index | 2 +- utils/templates/overrides/__init__/scroll | 2 +- utils/templates/overrides/__init__/update | 2 +- utils/templates/overrides/cluster/stats | 2 +- 47 files changed, 9091 insertions(+), 66 deletions(-) create mode 100644 elasticsearch/_async/client/__init__.py create mode 100644 elasticsearch/_async/client/async_search.py create mode 100644 elasticsearch/_async/client/autoscaling.py create mode 100644 elasticsearch/_async/client/cat.py create mode 100644 elasticsearch/_async/client/ccr.py create mode 100644 elasticsearch/_async/client/cluster.py create mode 100644 elasticsearch/_async/client/enrich.py create mode 100644 elasticsearch/_async/client/eql.py create mode 100644 elasticsearch/_async/client/graph.py create mode 100644 elasticsearch/_async/client/ilm.py create mode 100644 elasticsearch/_async/client/indices.py create mode 100644 elasticsearch/_async/client/ingest.py create mode 100644 elasticsearch/_async/client/license.py create mode 100644 elasticsearch/_async/client/migration.py create mode 100644 elasticsearch/_async/client/ml.py create mode 100644 elasticsearch/_async/client/monitoring.py create mode 100644 elasticsearch/_async/client/nodes.py create mode 100644 elasticsearch/_async/client/remote.py create mode 100644 elasticsearch/_async/client/rollup.py create mode 100644 elasticsearch/_async/client/searchable_snapshots.py create mode 100644 elasticsearch/_async/client/security.py create mode 100644 elasticsearch/_async/client/slm.py create mode 100644 elasticsearch/_async/client/snapshot.py create mode 100644 elasticsearch/_async/client/sql.py create mode 100644 elasticsearch/_async/client/ssl.py create mode 100644 elasticsearch/_async/client/tasks.py create mode 100644 elasticsearch/_async/client/transform.py create mode 100644 elasticsearch/_async/client/utils.py create mode 100644 elasticsearch/_async/client/watcher.py create mode 100644 elasticsearch/_async/client/xpack.py create mode 100644 test_elasticsearch/test_async/test_server/__init__.py create mode 100644 test_elasticsearch/test_async/test_server/conftest.py create mode 100644 test_elasticsearch/test_async/test_server/test_clients.py create mode 100644 test_elasticsearch/test_async/test_server/test_rest_api_spec.py diff --git a/dev-requirements.txt b/dev-requirements.txt index b7f95b66f..ce159bdec 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -18,3 +18,4 @@ black; python_version>="3.6" # Requirements for testing [async] extra aiohttp; python_version>="3.6" pytest-asyncio; python_version>="3.6" +unasync; python_version>="3.6" diff --git a/elasticsearch/__init__.py b/elasticsearch/__init__.py index c1a7c27bc..6fe968553 100644 --- a/elasticsearch/__init__.py +++ b/elasticsearch/__init__.py @@ -73,7 +73,8 @@ from ._async.http_aiohttp import AIOHttpConnection from ._async.transport import AsyncTransport + from ._async.client import AsyncElasticsearch - __all__ += ["AIOHttpConnection", "AsyncTransport"] + __all__ += ["AIOHttpConnection", "AsyncTransport", "AsyncElasticsearch"] except (ImportError, SyntaxError): pass diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py new file mode 100644 index 000000000..f93281a71 --- /dev/null +++ b/elasticsearch/_async/client/__init__.py @@ -0,0 +1,1976 @@ +# -*- coding: utf-8 -*- +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from __future__ import unicode_literals +import logging + +from ..transport import Transport +from ..exceptions import TransportError +from ..compat import string_types, urlparse, unquote +from .async_search import AsyncSearchClient +from .autoscaling import AutoscalingClient +from .indices import IndicesClient +from .ingest import IngestClient +from .cluster import ClusterClient +from .cat import CatClient +from .nodes import NodesClient +from .remote import RemoteClient +from .snapshot import SnapshotClient +from .tasks import TasksClient +from .xpack import XPackClient +from .utils import query_params, _make_path, SKIP_IN_PATH, _bulk_body + +# xpack APIs +from .ccr import CcrClient +from .eql import EqlClient +from .graph import GraphClient +from .ilm import IlmClient +from .license import LicenseClient +from .migration import MigrationClient +from .ml import MlClient +from .monitoring import MonitoringClient +from .rollup import RollupClient +from .security import SecurityClient +from .sql import SqlClient +from .ssl import SslClient +from .watcher import WatcherClient +from .enrich import EnrichClient +from .searchable_snapshots import SearchableSnapshotsClient +from .slm import SlmClient +from .transform import TransformClient + + +logger = logging.getLogger("elasticsearch") + + +def _normalize_hosts(hosts): + """ + Helper function to transform hosts argument to + :class:`~elasticsearch.Elasticsearch` to a list of dicts. + """ + # if hosts are empty, just defer to defaults down the line + if hosts is None: + return [{}] + + # passed in just one string + if isinstance(hosts, string_types): + hosts = [hosts] + + out = [] + # normalize hosts to dicts + for host in hosts: + if isinstance(host, string_types): + if "://" not in host: + host = "//%s" % host + + parsed_url = urlparse(host) + h = {"host": parsed_url.hostname} + + if parsed_url.port: + h["port"] = parsed_url.port + + if parsed_url.scheme == "https": + h["port"] = parsed_url.port or 443 + h["use_ssl"] = True + + if parsed_url.username or parsed_url.password: + h["http_auth"] = "%s:%s" % ( + unquote(parsed_url.username), + unquote(parsed_url.password), + ) + + if parsed_url.path and parsed_url.path != "/": + h["url_prefix"] = parsed_url.path + + out.append(h) + else: + out.append(host) + return out + + +class Elasticsearch(object): + """ + Elasticsearch low-level client. Provides a straightforward mapping from + Python to ES REST endpoints. + + The instance has attributes ``cat``, ``cluster``, ``indices``, ``ingest``, + ``nodes``, ``snapshot`` and ``tasks`` that provide access to instances of + :class:`~elasticsearch.client.CatClient`, + :class:`~elasticsearch.client.ClusterClient`, + :class:`~elasticsearch.client.IndicesClient`, + :class:`~elasticsearch.client.IngestClient`, + :class:`~elasticsearch.client.NodesClient`, + :class:`~elasticsearch.client.SnapshotClient` and + :class:`~elasticsearch.client.TasksClient` respectively. This is the + preferred (and only supported) way to get access to those classes and their + methods. + + You can specify your own connection class which should be used by providing + the ``connection_class`` parameter:: + + # create connection to localhost using the ThriftConnection + es = Elasticsearch(connection_class=ThriftConnection) + + If you want to turn on :ref:`sniffing` you have several options (described + in :class:`~elasticsearch.Transport`):: + + # create connection that will automatically inspect the cluster to get + # the list of active nodes. Start with nodes running on 'esnode1' and + # 'esnode2' + es = Elasticsearch( + ['esnode1', 'esnode2'], + # sniff before doing anything + sniff_on_start=True, + # refresh nodes after a node fails to respond + sniff_on_connection_fail=True, + # and also every 60 seconds + sniffer_timeout=60 + ) + + Different hosts can have different parameters, use a dictionary per node to + specify those:: + + # connect to localhost directly and another node using SSL on port 443 + # and an url_prefix. Note that ``port`` needs to be an int. + es = Elasticsearch([ + {'host': 'localhost'}, + {'host': 'othernode', 'port': 443, 'url_prefix': 'es', 'use_ssl': True}, + ]) + + If using SSL, there are several parameters that control how we deal with + certificates (see :class:`~elasticsearch.Urllib3HttpConnection` for + detailed description of the options):: + + es = Elasticsearch( + ['localhost:443', 'other_host:443'], + # turn on SSL + use_ssl=True, + # make sure we verify SSL certificates + verify_certs=True, + # provide a path to CA certs on disk + ca_certs='/path/to/CA_certs' + ) + + If using SSL, but don't verify the certs, a warning message is showed + optionally (see :class:`~elasticsearch.Urllib3HttpConnection` for + detailed description of the options):: + + es = Elasticsearch( + ['localhost:443', 'other_host:443'], + # turn on SSL + use_ssl=True, + # no verify SSL certificates + verify_certs=False, + # don't show warnings about ssl certs verification + ssl_show_warn=False + ) + + SSL client authentication is supported + (see :class:`~elasticsearch.Urllib3HttpConnection` for + detailed description of the options):: + + es = Elasticsearch( + ['localhost:443', 'other_host:443'], + # turn on SSL + use_ssl=True, + # make sure we verify SSL certificates + verify_certs=True, + # provide a path to CA certs on disk + ca_certs='/path/to/CA_certs', + # PEM formatted SSL client certificate + client_cert='/path/to/clientcert.pem', + # PEM formatted SSL client key + client_key='/path/to/clientkey.pem' + ) + + Alternatively you can use RFC-1738 formatted URLs, as long as they are not + in conflict with other options:: + + es = Elasticsearch( + [ + 'http://user:secret@localhost:9200/', + 'https://user:secret@other_host:443/production' + ], + verify_certs=True + ) + + By default, `JSONSerializer + `_ + is used to encode all outgoing requests. + However, you can implement your own custom serializer:: + + from elasticsearch.serializer import JSONSerializer + + class SetEncoder(JSONSerializer): + def default(self, obj): + if isinstance(obj, set): + return list(obj) + if isinstance(obj, Something): + return 'CustomSomethingRepresentation' + return JSONSerializer.default(self, obj) + + es = Elasticsearch(serializer=SetEncoder()) + + """ + + def __init__(self, hosts=None, transport_class=Transport, **kwargs): + """ + :arg hosts: list of nodes, or a single node, we should connect to. + Node should be a dictionary ({"host": "localhost", "port": 9200}), + the entire dictionary will be passed to the :class:`~elasticsearch.Connection` + class as kwargs, or a string in the format of ``host[:port]`` which will be + translated to a dictionary automatically. If no value is given the + :class:`~elasticsearch.Connection` class defaults will be used. + + :arg transport_class: :class:`~elasticsearch.Transport` subclass to use. + + :arg kwargs: any additional arguments will be passed on to the + :class:`~elasticsearch.Transport` class and, subsequently, to the + :class:`~elasticsearch.Connection` instances. + """ + self.transport = transport_class(_normalize_hosts(hosts), **kwargs) + + # namespaced clients for compatibility with API names + self.async_search = AsyncSearchClient(self) + self.autoscaling = AutoscalingClient(self) + self.indices = IndicesClient(self) + self.ingest = IngestClient(self) + self.cluster = ClusterClient(self) + self.cat = CatClient(self) + self.nodes = NodesClient(self) + self.remote = RemoteClient(self) + self.snapshot = SnapshotClient(self) + self.tasks = TasksClient(self) + + self.xpack = XPackClient(self) + self.eql = EqlClient(self) + self.ccr = CcrClient(self) + self.graph = GraphClient(self) + self.ilm = IlmClient(self) + self.indices = IndicesClient(self) + self.license = LicenseClient(self) + self.migration = MigrationClient(self) + self.ml = MlClient(self) + self.monitoring = MonitoringClient(self) + self.rollup = RollupClient(self) + self.security = SecurityClient(self) + self.sql = SqlClient(self) + self.ssl = SslClient(self) + self.watcher = WatcherClient(self) + self.enrich = EnrichClient(self) + self.searchable_snapshots = SearchableSnapshotsClient(self) + self.slm = SlmClient(self) + self.transform = TransformClient(self) + + def __repr__(self): + try: + # get a list of all connections + cons = self.transport.hosts + # truncate to 5 if there are too many + if len(cons) > 5: + cons = cons[:5] + ["..."] + return "<{cls}({cons})>".format(cls=self.__class__.__name__, cons=cons) + except Exception: + # probably operating on custom transport and connection_pool, ignore + return super(Elasticsearch, self).__repr__() + + # AUTO-GENERATED-API-DEFINITIONS # + @query_params() + def ping(self, params=None, headers=None): + """ + Returns whether the cluster is running. + ``_ + """ + try: + return self.transport.perform_request( + "HEAD", "/", params=params, headers=headers + ) + except TransportError: + return False + + @query_params() + def info(self, params=None, headers=None): + """ + Returns basic information about the cluster. + ``_ + """ + return self.transport.perform_request( + "GET", "/", params=params, headers=headers + ) + + @query_params( + "pipeline", + "refresh", + "routing", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + ) + def create(self, index, id, body, doc_type=None, params=None, headers=None): + """ + Creates a new document in the index. Returns a 409 response when a document + with a same ID already exists in the index. + ``_ + + :arg index: The name of the index + :arg id: Document ID + :arg body: The document + :arg doc_type: The type of the document + :arg pipeline: The pipeline id to preprocess incoming documents + with + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the index operation. Defaults + to 1, meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1) + """ + for param in (index, id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + path = _make_path(index, "_create", id) + else: + path = _make_path(index, doc_type, id) + + return self.transport.perform_request( + "POST" if id in SKIP_IN_PATH else "PUT", + path, + params=params, + headers=headers, + body=body, + ) + + @query_params( + "if_primary_term", + "if_seq_no", + "op_type", + "pipeline", + "refresh", + "routing", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + ) + def index(self, index, body, id=None, params=None, headers=None): + """ + Creates or updates a document in an index. + ``_ + + :arg index: The name of the index + :arg body: The document + :arg id: Document ID + :arg if_primary_term: only perform the index operation if the + last operation that has changed the document has the specified primary + term + :arg if_seq_no: only perform the index operation if the last + operation that has changed the document has the specified sequence + number + :arg op_type: Explicit operation type. Defaults to `index` for + requests with an explicit document ID, and to `create`for requests + without an explicit document ID Valid choices: index, create + :arg pipeline: The pipeline id to preprocess incoming documents + with + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the index operation. Defaults + to 1, meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1) + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST" if id in SKIP_IN_PATH else "PUT", + _make_path(index, "_doc", id), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "pipeline", + "refresh", + "routing", + "timeout", + "wait_for_active_shards", + ) + def bulk(self, body, index=None, doc_type=None, params=None, headers=None): + """ + Allows to perform multiple index/update/delete operations in a single request. + ``_ + + :arg body: The operation definition and data (action-data + pairs), separated by newlines + :arg index: Default index for items which don't provide one + :arg doc_type: Default document type for items which don't + provide one + :arg _source: True or false to return the _source field or not, + or default list of fields to return, can be overridden on each sub- + request + :arg _source_excludes: Default list of fields to exclude from + the returned _source field, can be overridden on each sub-request + :arg _source_includes: Default list of fields to extract and + return from the _source field, can be overridden on each sub-request + :arg pipeline: The pipeline id to preprocess incoming documents + with + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the bulk operation. Defaults + to 1, meaning the primary shard only. Set to `all` for all shard copies, + otherwise set to any non-negative value less than or equal to the total + number of copies for the shard (number of replicas + 1) + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_bulk"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None): + """ + Explicitly clears the search context for a scroll. + ``_ + + :arg body: A comma-separated list of scroll IDs to clear if none + was specified via the scroll_id parameter + :arg scroll_id: A comma-separated list of scroll IDs to clear + """ + if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: + raise ValueError("You need to supply scroll_id or body.") + elif scroll_id and not body: + body = {"scroll_id": [scroll_id]} + elif scroll_id: + params["scroll_id"] = scroll_id + + return self.transport.perform_request( + "DELETE", "/_search/scroll", params=params, headers=headers, body=body + ) + + @query_params( + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "default_operator", + "df", + "expand_wildcards", + "ignore_throttled", + "ignore_unavailable", + "lenient", + "min_score", + "preference", + "q", + "routing", + "terminate_after", + ) + def count(self, body=None, index=None, params=None, headers=None): + """ + Returns number of documents matching a query. + ``_ + + :arg body: A query to restrict the results specified with the + Query DSL (optional) + :arg index: A comma-separated list of indices to restrict the + results + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg min_score: Include only documents with a specific `_score` + value in the result + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg routing: A comma-separated list of specific routing values + :arg terminate_after: The maximum count for each shard, upon + reaching which the query execution will terminate early + """ + return self.transport.perform_request( + "POST", + _make_path(index, "_count"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "if_primary_term", + "if_seq_no", + "refresh", + "routing", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + ) + def delete(self, index, id, doc_type=None, params=None, headers=None): + """ + Removes a document from the index. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg doc_type: The type of the document + :arg if_primary_term: only perform the delete operation if the + last operation that has changed the document has the specified primary + term + :arg if_seq_no: only perform the delete operation if the last + operation that has changed the document has the specified sequence + number + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the delete operation. + Defaults to 1, meaning the primary shard only. Set to `all` for all + shard copies, otherwise set to any non-negative value less than or equal + to the total number of copies for the shard (number of replicas + 1) + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + doc_type = "_doc" + + return self.transport.perform_request( + "DELETE", _make_path(index, doc_type, id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "conflicts", + "default_operator", + "df", + "expand_wildcards", + "from_", + "ignore_unavailable", + "lenient", + "max_docs", + "preference", + "q", + "refresh", + "request_cache", + "requests_per_second", + "routing", + "scroll", + "scroll_size", + "search_timeout", + "search_type", + "slices", + "sort", + "stats", + "terminate_after", + "timeout", + "version", + "wait_for_active_shards", + "wait_for_completion", + ) + def delete_by_query(self, index, body, params=None, headers=None): + """ + Deletes documents matching the provided query. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg body: The search definition using the Query DSL + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg conflicts: What to do when the delete by query hits version + conflicts? Valid choices: abort, proceed Default: abort + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg from\\_: Starting offset (default: 0) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_docs: Maximum number of documents to process (default: + all documents) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg refresh: Should the affected indexes be refreshed? + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to index level setting + :arg requests_per_second: The throttle for this request in sub- + requests per second. -1 means no throttle. + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg scroll_size: Size on the scroll request powering the delete + by query Default: 100 + :arg search_timeout: Explicit timeout for each search request. + Defaults to no timeout. + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg slices: The number of slices this task should be divided + into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be + set to `auto`. Default: 1 + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Time each individual bulk request should wait for + shards that are unavailable. Default: 1m + :arg version: Specify whether to return document version as part + of a hit + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the delete by query + operation. Defaults to 1, meaning the primary shard only. Set to `all` + for all shard copies, otherwise set to any non-negative value less than + or equal to the total number of copies for the shard (number of replicas + + 1) + :arg wait_for_completion: Should the request should block until + the delete by query is complete. Default: True + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_delete_by_query"), + params=params, + headers=headers, + body=body, + ) + + @query_params("requests_per_second") + def delete_by_query_rethrottle(self, task_id, params=None, headers=None): + """ + Changes the number of requests per second for a particular Delete By Query + operation. + ``_ + + :arg task_id: The task id to rethrottle + :arg requests_per_second: The throttle to set on this request in + floating sub-requests per second. -1 means set no throttle. + """ + if task_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'task_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_delete_by_query", task_id, "_rethrottle"), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def delete_script(self, id, params=None, headers=None): + """ + Deletes a script. + ``_ + + :arg id: Script ID + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", _make_path("_scripts", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "stored_fields", + "version", + "version_type", + ) + def exists(self, index, id, params=None, headers=None): + """ + Returns information about whether a document exists in an index. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "HEAD", _make_path(index, "_doc", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "version", + "version_type", + ) + def exists_source(self, index, id, doc_type=None, params=None, headers=None): + """ + Returns information about whether a document source exists in an index. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg doc_type: The type of the document; deprecated and optional + starting with 7.0 + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "HEAD", + _make_path(index, doc_type, id, "_source"), + params=params, + headers=headers, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "analyze_wildcard", + "analyzer", + "default_operator", + "df", + "lenient", + "preference", + "q", + "routing", + "stored_fields", + ) + def explain(self, index, id, body=None, params=None, headers=None): + """ + Returns information about why a specific matches (or doesn't match) a query. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg body: The query definition using the Query DSL + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg analyze_wildcard: Specify whether wildcards and prefix + queries in the query string query should be analyzed (default: false) + :arg analyzer: The analyzer for the query string query + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The default field for query string query (default: + _all) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_explain", id), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "fields", + "ignore_unavailable", + "include_unmapped", + ) + def field_caps(self, index=None, params=None, headers=None): + """ + Returns the information about the capabilities of fields among multiple + indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg fields: A comma-separated list of field names + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_unmapped: Indicates whether unmapped fields should + be included in the response. + """ + return self.transport.perform_request( + "GET", _make_path(index, "_field_caps"), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "stored_fields", + "version", + "version_type", + ) + def get(self, index, id, params=None, headers=None): + """ + Returns a document. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "GET", _make_path(index, "_doc", id), params=params, headers=headers + ) + + @query_params("master_timeout") + def get_script(self, id, params=None, headers=None): + """ + Returns a script. + ``_ + + :arg id: Script ID + :arg master_timeout: Specify timeout for connection to master + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "GET", _make_path("_scripts", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "version", + "version_type", + ) + def get_source(self, index, id, params=None, headers=None): + """ + Returns the source of a document. + ``_ + + :arg index: The name of the index + :arg id: The document ID + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + """ + for param in (index, id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "GET", _make_path(index, "_source", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "preference", + "realtime", + "refresh", + "routing", + "stored_fields", + ) + def mget(self, body, index=None, params=None, headers=None): + """ + Allows to get multiple documents in one request. + ``_ + + :arg body: Document identifiers; can be either `docs` + (containing full document information) or `ids` (when index is provided + in the URL. + :arg index: The name of the index + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg realtime: Specify whether to perform the operation in + realtime or search mode + :arg refresh: Refresh the shard containing the document before + performing the operation + :arg routing: Specific routing value + :arg stored_fields: A comma-separated list of stored fields to + return in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_mget"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "ccs_minimize_roundtrips", + "max_concurrent_searches", + "max_concurrent_shard_requests", + "pre_filter_shard_size", + "rest_total_hits_as_int", + "search_type", + "typed_keys", + ) + def msearch(self, body, index=None, params=None, headers=None): + """ + Allows to execute several search operations in one request. + ``_ + + :arg body: The request definitions (metadata-search request + definition pairs), separated by newlines + :arg index: A comma-separated list of index names to use as + default + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg max_concurrent_searches: Controls the maximum number of + concurrent searches the multi search api will execute + :arg max_concurrent_shard_requests: The number of concurrent + shard requests each sub search executes concurrently per node. This + value should be used to limit the impact of the search on the cluster in + order to limit the number of concurrent shard requests Default: 5 + :arg pre_filter_shard_size: A threshold that enforces a pre- + filter roundtrip to prefilter search shards based on query rewriting if + the number of shards the search request expands to exceeds the + threshold. This filter roundtrip can limit the number of shards + significantly if for instance a shard can not match any documents based + on its rewrite method ie. if date filters are mandatory to match but the + shard bounds and the query are disjoint. + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg search_type: Search operation type Valid choices: + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path(index, "_msearch"), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "timeout") + def put_script(self, id, body, context=None, params=None, headers=None): + """ + Creates or updates a script. + ``_ + + :arg id: Script ID + :arg body: The document + :arg context: Context name to compile script against + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_scripts", id, context), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_unavailable", "search_type" + ) + def rank_eval(self, body, index=None, params=None, headers=None): + """ + Allows to evaluate the quality of ranked search results over a set of typical + search queries + ``_ + + :arg body: The ranking evaluation search definition, including + search requests, document ratings and ranking metric definition. + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_rank_eval"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "max_docs", + "refresh", + "requests_per_second", + "scroll", + "slices", + "timeout", + "wait_for_active_shards", + "wait_for_completion", + ) + def reindex(self, body, params=None, headers=None): + """ + Allows to copy documents from one index to another, optionally filtering the + source documents by a query, changing the destination index settings, or + fetching the documents from a remote cluster. + ``_ + + :arg body: The search definition using the Query DSL and the + prototype for the index request. + :arg max_docs: Maximum number of documents to process (default: + all documents) + :arg refresh: Should the affected indexes be refreshed? + :arg requests_per_second: The throttle to set on this request in + sub-requests per second. -1 means no throttle. + :arg scroll: Control how long to keep the search context alive + Default: 5m + :arg slices: The number of slices this task should be divided + into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be + set to `auto`. Default: 1 + :arg timeout: Time each individual bulk request should wait for + shards that are unavailable. Default: 1m + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the reindex operation. + Defaults to 1, meaning the primary shard only. Set to `all` for all + shard copies, otherwise set to any non-negative value less than or equal + to the total number of copies for the shard (number of replicas + 1) + :arg wait_for_completion: Should the request should block until + the reindex is complete. Default: True + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_reindex", params=params, headers=headers, body=body + ) + + @query_params("requests_per_second") + def reindex_rethrottle(self, task_id, params=None, headers=None): + """ + Changes the number of requests per second for a particular Reindex operation. + ``_ + + :arg task_id: The task id to rethrottle + :arg requests_per_second: The throttle to set on this request in + floating sub-requests per second. -1 means set no throttle. + """ + if task_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'task_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_reindex", task_id, "_rethrottle"), + params=params, + headers=headers, + ) + + @query_params() + def render_search_template(self, body=None, id=None, params=None, headers=None): + """ + Allows to use the Mustache language to pre-render a search definition. + ``_ + + :arg body: The search definition template and its params + :arg id: The id of the stored search template + """ + return self.transport.perform_request( + "POST", + _make_path("_render", "template", id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def scripts_painless_execute(self, body=None, params=None, headers=None): + """ + Allows an arbitrary script to be executed and a result to be returned + ``_ + + :arg body: The script to execute + """ + return self.transport.perform_request( + "POST", + "/_scripts/painless/_execute", + params=params, + headers=headers, + body=body, + ) + + @query_params("rest_total_hits_as_int", "scroll") + def scroll(self, body=None, scroll_id=None, params=None, headers=None): + """ + Allows to retrieve a large numbers of results from a single search request. + ``_ + + :arg body: The scroll ID if not passed by URL or query + parameter. + :arg scroll_id: The scroll ID for scrolled search + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + """ + if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: + raise ValueError("You need to supply scroll_id or body.") + elif scroll_id and not body: + body = {"scroll_id": scroll_id} + elif scroll_id: + params["scroll_id"] = scroll_id + + return self.transport.perform_request( + "POST", "/_search/scroll", params=params, headers=headers, body=body + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "allow_partial_search_results", + "analyze_wildcard", + "analyzer", + "batched_reduce_size", + "ccs_minimize_roundtrips", + "default_operator", + "df", + "docvalue_fields", + "expand_wildcards", + "explain", + "from_", + "ignore_throttled", + "ignore_unavailable", + "lenient", + "max_concurrent_shard_requests", + "pre_filter_shard_size", + "preference", + "q", + "request_cache", + "rest_total_hits_as_int", + "routing", + "scroll", + "search_type", + "seq_no_primary_term", + "size", + "sort", + "stats", + "stored_fields", + "suggest_field", + "suggest_mode", + "suggest_size", + "suggest_text", + "terminate_after", + "timeout", + "track_scores", + "track_total_hits", + "typed_keys", + "version", + ) + def search(self, body=None, index=None, params=None, headers=None): + """ + Returns results matching a query. + ``_ + + :arg body: The search definition using the Query DSL + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg allow_partial_search_results: Indicate if an error should + be returned if there is a partial search failure or timeout Default: + True + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg batched_reduce_size: The number of shard results that + should be reduced at once on the coordinating node. This value should be + used as a protection mechanism to reduce the memory overhead per search + request if the potential number of shards in the request can be large. + Default: 512 + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg docvalue_fields: A comma-separated list of fields to return + as the docvalue representation of a field for each hit + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Specify whether to return detailed information + about score computation as part of a hit + :arg from\\_: Starting offset (default: 0) + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_concurrent_shard_requests: The number of concurrent + shard requests per node this search executes concurrently. This value + should be used to limit the impact of the search on the cluster in order + to limit the number of concurrent shard requests Default: 5 + :arg pre_filter_shard_size: A threshold that enforces a pre- + filter roundtrip to prefilter search shards based on query rewriting if + the number of shards the search request expands to exceeds the + threshold. This filter roundtrip can limit the number of shards + significantly if for instance a shard can not match any documents based + on its rewrite method ie. if date filters are mandatory to match but the + shard bounds and the query are disjoint. + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to index level setting + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg seq_no_primary_term: Specify whether to return sequence + number and primary term of the last modification of each hit + :arg size: Number of hits to return (default: 10) + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg stored_fields: A comma-separated list of stored fields to + return as part of a hit + :arg suggest_field: Specify which field to use for suggestions + :arg suggest_mode: Specify suggest mode Valid choices: missing, + popular, always Default: missing + :arg suggest_size: How many suggestions to return in response + :arg suggest_text: The source text for which the suggestions + should be returned + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Explicit operation timeout + :arg track_scores: Whether to calculate and return scores even + if they are not used for sorting + :arg track_total_hits: Indicate if the number of documents that + match the query should be tracked + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + :arg version: Specify whether to return document version as part + of a hit + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "POST", + _make_path(index, "_search"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "local", + "preference", + "routing", + ) + def search_shards(self, index=None, params=None, headers=None): + """ + Returns information about the indices and shards that a search request would be + executed against. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg routing: Specific routing value + """ + return self.transport.perform_request( + "GET", _make_path(index, "_search_shards"), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "if_primary_term", + "if_seq_no", + "lang", + "refresh", + "retry_on_conflict", + "routing", + "timeout", + "wait_for_active_shards", + ) + def update(self, index, id, body, doc_type=None, params=None, headers=None): + """ + Updates a document with a script or partial document. + ``_ + + :arg index: The name of the index + :arg id: Document ID + :arg body: The request definition requires either `script` or + partial `doc` + :arg doc_type: The type of the document + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg if_primary_term: only perform the update operation if the + last operation that has changed the document has the specified primary + term + :arg if_seq_no: only perform the update operation if the last + operation that has changed the document has the specified sequence + number + :arg lang: The script language (default: painless) + :arg refresh: If `true` then refresh the affected shards to make + this operation visible to search, if `wait_for` then wait for a refresh + to make this operation visible to search, if `false` (the default) then + do nothing with refreshes. Valid choices: true, false, wait_for + :arg retry_on_conflict: Specify how many times should the + operation be retried when a conflict occurs (default: 0) + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the update operation. + Defaults to 1, meaning the primary shard only. Set to `all` for all + shard copies, otherwise set to any non-negative value less than or equal + to the total number of copies for the shard (number of replicas + 1) + """ + for param in (index, id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + if doc_type in SKIP_IN_PATH: + path = _make_path(index, "_update", id) + else: + path = _make_path(index, doc_type, id, "_update") + + return self.transport.perform_request( + "POST", path, params=params, headers=headers, body=body + ) + + @query_params("requests_per_second") + def update_by_query_rethrottle(self, task_id, params=None, headers=None): + """ + Changes the number of requests per second for a particular Update By Query + operation. + ``_ + + :arg task_id: The task id to rethrottle + :arg requests_per_second: The throttle to set on this request in + floating sub-requests per second. -1 means set no throttle. + """ + if task_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'task_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_update_by_query", task_id, "_rethrottle"), + params=params, + headers=headers, + ) + + @query_params() + def get_script_context(self, params=None, headers=None): + """ + Returns all script contexts. + ``_ + """ + return self.transport.perform_request( + "GET", "/_script_context", params=params, headers=headers + ) + + @query_params() + def get_script_languages(self, params=None, headers=None): + """ + Returns available script types, languages and contexts + ``_ + """ + return self.transport.perform_request( + "GET", "/_script_language", params=params, headers=headers + ) + + @query_params( + "ccs_minimize_roundtrips", + "max_concurrent_searches", + "rest_total_hits_as_int", + "search_type", + "typed_keys", + ) + def msearch_template(self, body, index=None, params=None, headers=None): + """ + Allows to execute several search template operations in one request. + ``_ + + :arg body: The request definitions (metadata-search request + definition pairs), separated by newlines + :arg index: A comma-separated list of index names to use as + default + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg max_concurrent_searches: Controls the maximum number of + concurrent searches the multi search api will execute + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg search_type: Search operation type Valid choices: + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path(index, "_msearch", "template"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "field_statistics", + "fields", + "ids", + "offsets", + "payloads", + "positions", + "preference", + "realtime", + "routing", + "term_statistics", + "version", + "version_type", + ) + def mtermvectors(self, body=None, index=None, params=None, headers=None): + """ + Returns multiple termvectors in one request. + ``_ + + :arg body: Define ids, documents, parameters or a list of + parameters per document here. You must at least provide a list of + document ids. See documentation. + :arg index: The index in which the document resides. + :arg field_statistics: Specifies if document count, sum of + document frequencies and sum of total term frequencies should be + returned. Applies to all returned documents unless otherwise specified + in body "params" or "docs". Default: True + :arg fields: A comma-separated list of fields to return. Applies + to all returned documents unless otherwise specified in body "params" or + "docs". + :arg ids: A comma-separated list of documents ids. You must + define ids as parameter or set "ids" or "docs" in the request body + :arg offsets: Specifies if term offsets should be returned. + Applies to all returned documents unless otherwise specified in body + "params" or "docs". Default: True + :arg payloads: Specifies if term payloads should be returned. + Applies to all returned documents unless otherwise specified in body + "params" or "docs". Default: True + :arg positions: Specifies if term positions should be returned. + Applies to all returned documents unless otherwise specified in body + "params" or "docs". Default: True + :arg preference: Specify the node or shard the operation should + be performed on (default: random) .Applies to all returned documents + unless otherwise specified in body "params" or "docs". + :arg realtime: Specifies if requests are real-time as opposed to + near-real-time (default: true). + :arg routing: Specific routing value. Applies to all returned + documents unless otherwise specified in body "params" or "docs". + :arg term_statistics: Specifies if total term frequency and + document frequency should be returned. Applies to all returned documents + unless otherwise specified in body "params" or "docs". + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + """ + return self.transport.perform_request( + "POST", + _make_path(index, "_mtermvectors"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "ccs_minimize_roundtrips", + "expand_wildcards", + "explain", + "ignore_throttled", + "ignore_unavailable", + "preference", + "profile", + "rest_total_hits_as_int", + "routing", + "scroll", + "search_type", + "typed_keys", + ) + def search_template(self, body, index=None, params=None, headers=None): + """ + Allows to use the Mustache language to pre-render a search definition. + ``_ + + :arg body: The search definition template and its params + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg ccs_minimize_roundtrips: Indicates whether network round- + trips should be minimized as part of cross-cluster search requests + execution Default: true + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Specify whether to return detailed information + about score computation as part of a hit + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg profile: Specify whether to profile the query execution + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg search_type: Search operation type Valid choices: + query_then_fetch, query_and_fetch, dfs_query_then_fetch, + dfs_query_and_fetch + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_search", "template"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "field_statistics", + "fields", + "offsets", + "payloads", + "positions", + "preference", + "realtime", + "routing", + "term_statistics", + "version", + "version_type", + ) + def termvectors(self, index, body=None, id=None, params=None, headers=None): + """ + Returns information and statistics about terms in the fields of a particular + document. + ``_ + + :arg index: The index in which the document resides. + :arg body: Define parameters and or supply a document to get + termvectors for. See documentation. + :arg id: The id of the document, when not specified a doc param + should be supplied. + :arg field_statistics: Specifies if document count, sum of + document frequencies and sum of total term frequencies should be + returned. Default: True + :arg fields: A comma-separated list of fields to return. + :arg offsets: Specifies if term offsets should be returned. + Default: True + :arg payloads: Specifies if term payloads should be returned. + Default: True + :arg positions: Specifies if term positions should be returned. + Default: True + :arg preference: Specify the node or shard the operation should + be performed on (default: random). + :arg realtime: Specifies if request is real-time as opposed to + near-real-time (default: true). + :arg routing: Specific routing value. + :arg term_statistics: Specifies if total term frequency and + document frequency should be returned. + :arg version: Explicit version number for concurrency control + :arg version_type: Specific version type Valid choices: + internal, external, external_gte + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_termvectors", id), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "conflicts", + "default_operator", + "df", + "expand_wildcards", + "from_", + "ignore_unavailable", + "lenient", + "max_docs", + "pipeline", + "preference", + "q", + "refresh", + "request_cache", + "requests_per_second", + "routing", + "scroll", + "scroll_size", + "search_timeout", + "search_type", + "slices", + "sort", + "stats", + "terminate_after", + "timeout", + "version", + "version_type", + "wait_for_active_shards", + "wait_for_completion", + ) + def update_by_query(self, index, body=None, params=None, headers=None): + """ + Performs an update on every document in the index without changing the source, + for example to pick up a mapping change. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg body: The search definition using the Query DSL + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg conflicts: What to do when the update by query hits version + conflicts? Valid choices: abort, proceed Default: abort + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg from\\_: Starting offset (default: 0) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_docs: Maximum number of documents to process (default: + all documents) + :arg pipeline: Ingest pipeline to set on index requests made by + this action. (default: none) + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg refresh: Should the affected indexes be refreshed? + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to index level setting + :arg requests_per_second: The throttle to set on this request in + sub-requests per second. -1 means no throttle. + :arg routing: A comma-separated list of specific routing values + :arg scroll: Specify how long a consistent view of the index + should be maintained for scrolled search + :arg scroll_size: Size on the scroll request powering the update + by query Default: 100 + :arg search_timeout: Explicit timeout for each search request. + Defaults to no timeout. + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg slices: The number of slices this task should be divided + into. Defaults to 1, meaning the task isn't sliced into subtasks. Can be + set to `auto`. Default: 1 + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Time each individual bulk request should wait for + shards that are unavailable. Default: 1m + :arg version: Specify whether to return document version as part + of a hit + :arg version_type: Should the document increment the version + number (internal) on hit or not (reindex) + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before proceeding with the update by query + operation. Defaults to 1, meaning the primary shard only. Set to `all` + for all shard copies, otherwise set to any non-negative value less than + or equal to the total number of copies for the shard (number of replicas + + 1) + :arg wait_for_completion: Should the request should block until + the update by query operation is complete. Default: True + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_update_by_query"), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py new file mode 100644 index 000000000..d6062cbee --- /dev/null +++ b/elasticsearch/_async/client/async_search.py @@ -0,0 +1,191 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path + + +class AsyncSearchClient(NamespacedClient): + @query_params() + def delete(self, id, params=None, headers=None): + """ + Deletes an async search by ID. If the search is still running, the search + request will be cancelled. Otherwise, the saved search results are deleted. + ``_ + + :arg id: The async search ID + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", _make_path("_async_search", id), params=params, headers=headers + ) + + @query_params("keep_alive", "typed_keys", "wait_for_completion_timeout") + def get(self, id, params=None, headers=None): + """ + Retrieves the results of a previously submitted async search request given its + ID. + ``_ + + :arg id: The async search ID + :arg keep_alive: Specify the time interval in which the results + (partial or final) for this search will be available + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + :arg wait_for_completion_timeout: Specify the time that the + request should block waiting for the final response + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "GET", _make_path("_async_search", id), params=params, headers=headers + ) + + @query_params( + "_source", + "_source_excludes", + "_source_includes", + "allow_no_indices", + "allow_partial_search_results", + "analyze_wildcard", + "analyzer", + "batched_reduce_size", + "default_operator", + "df", + "docvalue_fields", + "expand_wildcards", + "explain", + "from_", + "ignore_throttled", + "ignore_unavailable", + "keep_alive", + "keep_on_completion", + "lenient", + "max_concurrent_shard_requests", + "preference", + "q", + "request_cache", + "routing", + "search_type", + "seq_no_primary_term", + "size", + "sort", + "stats", + "stored_fields", + "suggest_field", + "suggest_mode", + "suggest_size", + "suggest_text", + "terminate_after", + "timeout", + "track_scores", + "track_total_hits", + "typed_keys", + "version", + "wait_for_completion_timeout", + ) + def submit(self, body=None, index=None, params=None, headers=None): + """ + Executes a search request asynchronously. + ``_ + + :arg body: The search definition using the Query DSL + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg _source: True or false to return the _source field or not, + or a list of fields to return + :arg _source_excludes: A list of fields to exclude from the + returned _source field + :arg _source_includes: A list of fields to extract and return + from the _source field + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg allow_partial_search_results: Indicate if an error should + be returned if there is a partial search failure or timeout Default: + True + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg batched_reduce_size: The number of shard results that + should be reduced at once on the coordinating node. This value should be + used as the granularity at which progress results will be made + available. Default: 5 + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg docvalue_fields: A comma-separated list of fields to return + as the docvalue representation of a field for each hit + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Specify whether to return detailed information + about score computation as part of a hit + :arg from\\_: Starting offset (default: 0) + :arg ignore_throttled: Whether specified concrete, expanded or + aliased indices should be ignored when throttled + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg keep_alive: Update the time interval in which the results + (partial or final) for this search will be available Default: 5d + :arg keep_on_completion: Control whether the response should be + stored in the cluster if it completed within the provided + [wait_for_completion] time (default: false) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg max_concurrent_shard_requests: The number of concurrent + shard requests per node this search executes concurrently. This value + should be used to limit the impact of the search on the cluster in order + to limit the number of concurrent shard requests Default: 5 + :arg preference: Specify the node or shard the operation should + be performed on (default: random) + :arg q: Query in the Lucene query string syntax + :arg request_cache: Specify if request cache should be used for + this request or not, defaults to true + :arg routing: A comma-separated list of specific routing values + :arg search_type: Search operation type Valid choices: + query_then_fetch, dfs_query_then_fetch + :arg seq_no_primary_term: Specify whether to return sequence + number and primary term of the last modification of each hit + :arg size: Number of hits to return (default: 10) + :arg sort: A comma-separated list of : pairs + :arg stats: Specific 'tag' of the request for logging and + statistical purposes + :arg stored_fields: A comma-separated list of stored fields to + return as part of a hit + :arg suggest_field: Specify which field to use for suggestions + :arg suggest_mode: Specify suggest mode Valid choices: missing, + popular, always Default: missing + :arg suggest_size: How many suggestions to return in response + :arg suggest_text: The source text for which the suggestions + should be returned + :arg terminate_after: The maximum number of documents to collect + for each shard, upon reaching which the query execution will terminate + early. + :arg timeout: Explicit operation timeout + :arg track_scores: Whether to calculate and return scores even + if they are not used for sorting + :arg track_total_hits: Indicate if the number of documents that + match the query should be tracked + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + :arg version: Specify whether to return document version as part + of a hit + :arg wait_for_completion_timeout: Specify the time that the + request should block waiting for the final response Default: 1s + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "POST", + _make_path(index, "_async_search"), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py new file mode 100644 index 000000000..a648d79e4 --- /dev/null +++ b/elasticsearch/_async/client/autoscaling.py @@ -0,0 +1,75 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, SKIP_IN_PATH, _make_path + + +class AutoscalingClient(NamespacedClient): + @query_params() + def get_autoscaling_decision(self, params=None, headers=None): + """ + Gets the current autoscaling decision based on the configured autoscaling + policy, indicating whether or not autoscaling is needed. + ``_ + """ + return self.transport.perform_request( + "GET", "/_autoscaling/decision", params=params, headers=headers + ) + + @query_params() + def delete_autoscaling_policy(self, name, params=None, headers=None): + """ + Deletes an autoscaling policy. + ``_ + + :arg name: the name of the autoscaling policy + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_autoscaling", "policy", name), + params=params, + headers=headers, + ) + + @query_params() + def put_autoscaling_policy(self, name, body, params=None, headers=None): + """ + Creates a new autoscaling policy. + ``_ + + :arg name: the name of the autoscaling policy + :arg body: the specification of the autoscaling policy + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_autoscaling", "policy", name), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_autoscaling_policy(self, name, params=None, headers=None): + """ + Retrieves an autoscaling policy. + ``_ + + :arg name: the name of the autoscaling policy + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "GET", + _make_path("_autoscaling", "policy", name), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py new file mode 100644 index 000000000..84282850e --- /dev/null +++ b/elasticsearch/_async/client/cat.py @@ -0,0 +1,713 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path + + +class CatClient(NamespacedClient): + @query_params("expand_wildcards", "format", "h", "help", "local", "s", "v") + def aliases(self, name=None, params=None, headers=None): + """ + Shows information about currently configured aliases to indices including + filter and routing infos. + ``_ + + :arg name: A comma-separated list of alias names to return + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "aliases", name), params=params, headers=headers + ) + + @query_params("bytes", "format", "h", "help", "local", "master_timeout", "s", "v") + def allocation(self, node_id=None, params=None, headers=None): + """ + Provides a snapshot of how many shards are allocated to each data node and how + much disk space they are using. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "allocation", node_id), + params=params, + headers=headers, + ) + + @query_params("format", "h", "help", "s", "v") + def count(self, index=None, params=None, headers=None): + """ + Provides quick access to the document count of the entire cluster, or + individual indices. + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "count", index), params=params, headers=headers + ) + + @query_params("format", "h", "help", "s", "time", "ts", "v") + def health(self, params=None, headers=None): + """ + Returns a concise representation of the cluster health. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg ts: Set to false to disable timestamping Default: True + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/health", params=params, headers=headers + ) + + @query_params("help", "s") + def help(self, params=None, headers=None): + """ + Returns help for the Cat APIs. + ``_ + + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + """ + return self.transport.perform_request( + "GET", "/_cat", params=params, headers=headers + ) + + @query_params( + "bytes", + "expand_wildcards", + "format", + "h", + "health", + "help", + "include_unloaded_segments", + "local", + "master_timeout", + "pri", + "s", + "time", + "v", + ) + def indices(self, index=None, params=None, headers=None): + """ + Returns information about indices: number of primaries and replicas, document + counts, disk size, ... + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg health: A health status ("green", "yellow", or "red" to + filter only indices matching the specified health status Valid choices: + green, yellow, red + :arg help: Return help information + :arg include_unloaded_segments: If set to true segment stats + will include stats for segments that are not currently loaded into + memory + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg pri: Set to true to return stats only for primary shards + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "indices", index), params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def master(self, params=None, headers=None): + """ + Returns information about the master node. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/master", params=params, headers=headers + ) + + @query_params( + "bytes", "format", "full_id", "h", "help", "master_timeout", "s", "time", "v" + ) + def nodes(self, params=None, headers=None): + """ + Returns basic statistics about performance of cluster nodes. + ``_ + + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg full_id: Return the full node ID instead of the shortened + version (default: false) + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/nodes", params=params, headers=headers + ) + + @query_params( + "active_only", "bytes", "detailed", "format", "h", "help", "s", "time", "v" + ) + def recovery(self, index=None, params=None, headers=None): + """ + Returns information about index shard recoveries, both on-going completed. + ``_ + + :arg index: Comma-separated list or wildcard expression of index + names to limit the returned information + :arg active_only: If `true`, the response only includes ongoing + shard recoveries + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg detailed: If `true`, the response includes detailed + information about shard recoveries + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "recovery", index), params=params, headers=headers + ) + + @query_params( + "bytes", "format", "h", "help", "local", "master_timeout", "s", "time", "v" + ) + def shards(self, index=None, params=None, headers=None): + """ + Provides a detailed view of shard allocation on nodes. + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "shards", index), params=params, headers=headers + ) + + @query_params("bytes", "format", "h", "help", "s", "v") + def segments(self, index=None, params=None, headers=None): + """ + Provides low-level information about the segments in the shards of an index. + ``_ + + :arg index: A comma-separated list of index names to limit the + returned information + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "segments", index), params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "time", "v") + def pending_tasks(self, params=None, headers=None): + """ + Returns a concise representation of the cluster pending tasks. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/pending_tasks", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "time", "v") + def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): + """ + Returns cluster-wide thread pool statistics per node. By default the active, + queue and rejected statistics are returned for all thread pools. + ``_ + + :arg thread_pool_patterns: A comma-separated list of regular- + expressions to filter the thread pools in the output + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "thread_pool", thread_pool_patterns), + params=params, + headers=headers, + ) + + @query_params("bytes", "format", "h", "help", "s", "v") + def fielddata(self, fields=None, params=None, headers=None): + """ + Shows how much heap memory is currently being used by fielddata on every data + node in the cluster. + ``_ + + :arg fields: A comma-separated list of fields to return in the + output + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "fielddata", fields), + params=params, + headers=headers, + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def plugins(self, params=None, headers=None): + """ + Returns information about installed plugins across nodes node. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/plugins", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def nodeattrs(self, params=None, headers=None): + """ + Returns information about custom node attributes. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/nodeattrs", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def repositories(self, params=None, headers=None): + """ + Returns information about snapshot repositories registered in the cluster. + ``_ + + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/repositories", params=params, headers=headers + ) + + @query_params( + "format", "h", "help", "ignore_unavailable", "master_timeout", "s", "time", "v" + ) + def snapshots(self, repository=None, params=None, headers=None): + """ + Returns all snapshots in a specific repository. + ``_ + + :arg repository: Name of repository from which to fetch the + snapshot information + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg ignore_unavailable: Set to true to ignore unavailable + snapshots + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "snapshots", repository), + params=params, + headers=headers, + ) + + @query_params( + "actions", + "detailed", + "format", + "h", + "help", + "node_id", + "parent_task", + "s", + "time", + "v", + ) + def tasks(self, params=None, headers=None): + """ + Returns information about the tasks currently executing on one or more nodes in + the cluster. + ``_ + + :arg actions: A comma-separated list of actions that should be + returned. Leave empty to return all. + :arg detailed: Return detailed task information (default: false) + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg parent_task: Return tasks with specified parent task id. + Set to -1 to return all. + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", "/_cat/tasks", params=params, headers=headers + ) + + @query_params("format", "h", "help", "local", "master_timeout", "s", "v") + def templates(self, name=None, params=None, headers=None): + """ + Returns information about existing templates. + ``_ + + :arg name: A pattern that returned template names must match + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", _make_path("_cat", "templates", name), params=params, headers=headers + ) + + @query_params("allow_no_match", "bytes", "format", "h", "help", "s", "time", "v") + def ml_data_frame_analytics(self, id=None, params=None, headers=None): + """ + Gets configuration and usage information about data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no configs. (This includes `_all` string or when no configs have + been specified) + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "data_frame", "analytics", id), + params=params, + headers=headers, + ) + + @query_params("allow_no_datafeeds", "format", "h", "help", "s", "time", "v") + def ml_datafeeds(self, datafeed_id=None, params=None, headers=None): + """ + Gets configuration and usage information about datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeeds stats to fetch + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_jobs", "bytes", "format", "h", "help", "s", "time", "v") + def ml_jobs(self, job_id=None, params=None, headers=None): + """ + Gets configuration and usage information about anomaly detection jobs. + ``_ + + :arg job_id: The ID of the jobs stats to fetch + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_match", + "bytes", + "format", + "from_", + "h", + "help", + "s", + "size", + "time", + "v", + ) + def ml_trained_models(self, model_id=None, params=None, headers=None): + """ + Gets configuration and usage information about inference trained models. + ``_ + + :arg model_id: The ID of the trained models stats to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no trained models. (This includes `_all` string or when no + trained models have been specified) Default: True + :arg bytes: The unit in which to display byte values Valid + choices: b, k, kb, m, mb, g, gb, t, tb, p, pb + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg from\\_: skips a number of trained models + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg size: specifies a max number of trained models to get + Default: 100 + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_cat", "ml", "trained_models", model_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_match", "format", "from_", "h", "help", "s", "size", "time", "v" + ) + def transforms(self, transform_id=None, params=None, headers=None): + """ + Gets configuration and usage information about transforms. + ``_ + + :arg transform_id: The id of the transform for which to get + stats. '_all' or '*' implies all transforms + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg format: a short version of the Accept header, e.g. json, + yaml + :arg from\\_: skips a number of transform configs, defaults to 0 + :arg h: Comma-separated list of column names to display + :arg help: Return help information + :arg s: Comma-separated list of column names or column aliases + to sort by + :arg size: specifies a max number of transforms to get, defaults + to 100 + :arg time: The unit in which to display time values Valid + choices: d, h, m, s, ms, micros, nanos + :arg v: Verbose mode. Display column headers + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_cat", "transforms", transform_id), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py new file mode 100644 index 000000000..fa0568fc1 --- /dev/null +++ b/elasticsearch/_async/client/ccr.py @@ -0,0 +1,259 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class CcrClient(NamespacedClient): + @query_params() + def delete_auto_follow_pattern(self, name, params=None, headers=None): + """ + Deletes auto-follow patterns. + ``_ + + :arg name: The name of the auto follow pattern. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ccr", "auto_follow", name), + params=params, + headers=headers, + ) + + @query_params("wait_for_active_shards") + def follow(self, index, body, params=None, headers=None): + """ + Creates a new follower index configured to follow the referenced leader index. + ``_ + + :arg index: The name of the follower index + :arg body: The name of the leader index and other optional ccr + related parameters + :arg wait_for_active_shards: Sets the number of shard copies + that must be active before returning. Defaults to 0. Set to `all` for + all shard copies, otherwise set to any non-negative value less than or + equal to the total number of copies for the shard (number of replicas + + 1) Default: 0 + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_ccr", "follow"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def follow_info(self, index, params=None, headers=None): + """ + Retrieves information about all follower indices, including parameters and + status for each follower index + ``_ + + :arg index: A comma-separated list of index patterns; use `_all` + to perform the operation on all indices + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_ccr", "info"), params=params, headers=headers + ) + + @query_params() + def follow_stats(self, index, params=None, headers=None): + """ + Retrieves follower stats. return shard-level stats about the following tasks + associated with each shard for the specified indices. + ``_ + + :arg index: A comma-separated list of index patterns; use `_all` + to perform the operation on all indices + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_ccr", "stats"), params=params, headers=headers + ) + + @query_params() + def forget_follower(self, index, body, params=None, headers=None): + """ + Removes the follower retention leases from the leader. + ``_ + + :arg index: the name of the leader index for which specified + follower retention leases should be removed + :arg body: the name and UUID of the follower index, the name of + the cluster containing the follower index, and the alias from the + perspective of that cluster for the remote cluster containing the leader + index + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "forget_follower"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_auto_follow_pattern(self, name=None, params=None, headers=None): + """ + Gets configured auto-follow patterns. Returns the specified auto-follow pattern + collection. + ``_ + + :arg name: The name of the auto follow pattern. + """ + return self.transport.perform_request( + "GET", + _make_path("_ccr", "auto_follow", name), + params=params, + headers=headers, + ) + + @query_params() + def pause_follow(self, index, params=None, headers=None): + """ + Pauses a follower index. The follower index will not fetch any additional + operations from the leader index. + ``_ + + :arg index: The name of the follower index that should pause + following its leader index. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "pause_follow"), + params=params, + headers=headers, + ) + + @query_params() + def put_auto_follow_pattern(self, name, body, params=None, headers=None): + """ + Creates a new named collection of auto-follow patterns against a specified + remote cluster. Newly created indices on the remote cluster matching any of the + specified patterns will be automatically configured as follower indices. + ``_ + + :arg name: The name of the auto follow pattern. + :arg body: The specification of the auto follow pattern + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ccr", "auto_follow", name), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def resume_follow(self, index, body=None, params=None, headers=None): + """ + Resumes a follower index that has been paused + ``_ + + :arg index: The name of the follow index to resume following. + :arg body: The name of the leader index and other optional ccr + related parameters + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "resume_follow"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def stats(self, params=None, headers=None): + """ + Gets all stats related to cross-cluster replication. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ccr/stats", params=params, headers=headers + ) + + @query_params() + def unfollow(self, index, params=None, headers=None): + """ + Stops the following task associated with a follower index and removes index + metadata and settings associated with cross-cluster replication. + ``_ + + :arg index: The name of the follower index that should be turned + into a regular index. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_ccr", "unfollow"), + params=params, + headers=headers, + ) + + @query_params() + def pause_auto_follow_pattern(self, name, params=None, headers=None): + """ + Pauses an auto-follow pattern + ``_ + + :arg name: The name of the auto follow pattern that should pause + discovering new indices to follow. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "POST", + _make_path("_ccr", "auto_follow", name, "pause"), + params=params, + headers=headers, + ) + + @query_params() + def resume_auto_follow_pattern(self, name, params=None, headers=None): + """ + Resumes an auto-follow pattern that has been paused + ``_ + + :arg name: The name of the auto follow pattern to resume + discovering new indices to follow. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "POST", + _make_path("_ccr", "auto_follow", name, "resume"), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py new file mode 100644 index 000000000..600b89e70 --- /dev/null +++ b/elasticsearch/_async/client/cluster.py @@ -0,0 +1,361 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class ClusterClient(NamespacedClient): + @query_params( + "expand_wildcards", + "level", + "local", + "master_timeout", + "timeout", + "wait_for_active_shards", + "wait_for_events", + "wait_for_no_initializing_shards", + "wait_for_no_relocating_shards", + "wait_for_nodes", + "wait_for_status", + ) + def health(self, index=None, params=None, headers=None): + """ + Returns basic information about the health of the cluster. + ``_ + + :arg index: Limit the information returned to a specific index + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg level: Specify the level of detail for returned information + Valid choices: cluster, indices, shards Default: cluster + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Wait until the specified number of + shards is active + :arg wait_for_events: Wait until all currently queued events + with the given priority are processed Valid choices: immediate, urgent, + high, normal, low, languid + :arg wait_for_no_initializing_shards: Whether to wait until + there are no initializing shards in the cluster + :arg wait_for_no_relocating_shards: Whether to wait until there + are no relocating shards in the cluster + :arg wait_for_nodes: Wait until the specified number of nodes is + available + :arg wait_for_status: Wait until cluster is in a specific state + Valid choices: green, yellow, red + """ + return self.transport.perform_request( + "GET", + _make_path("_cluster", "health", index), + params=params, + headers=headers, + ) + + @query_params("local", "master_timeout") + def pending_tasks(self, params=None, headers=None): + """ + Returns a list of any cluster-level changes (e.g. create index, update mapping, + allocate or fail shard) which have not yet been executed. + ``_ + + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + return self.transport.perform_request( + "GET", "/_cluster/pending_tasks", params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "local", + "master_timeout", + "wait_for_metadata_version", + "wait_for_timeout", + ) + def state(self, metric=None, index=None, params=None, headers=None): + """ + Returns a comprehensive information about the state of the cluster. + ``_ + + :arg metric: Limit the information returned to the specified + metrics Valid choices: _all, blocks, metadata, nodes, routing_table, + routing_nodes, master_node, version + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + :arg wait_for_metadata_version: Wait for the metadata version to + be equal or greater than the specified metadata version + :arg wait_for_timeout: The maximum time to wait for + wait_for_metadata_version before timing out + """ + if index and metric in SKIP_IN_PATH: + metric = "_all" + + return self.transport.perform_request( + "GET", + _make_path("_cluster", "state", metric, index), + params=params, + headers=headers, + ) + + @query_params("flat_settings", "timeout") + def stats(self, node_id=None, params=None, headers=None): + """ + Returns high-level overview of cluster statistics. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg flat_settings: Return settings in flat format (default: + false) + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", + "/_cluster/stats" + if node_id in SKIP_IN_PATH + else _make_path("_cluster", "stats", "nodes", node_id), + params=params, + headers=headers, + ) + + @query_params( + "dry_run", "explain", "master_timeout", "metric", "retry_failed", "timeout" + ) + def reroute(self, body=None, params=None, headers=None): + """ + Allows to manually change the allocation of individual shards in the cluster. + ``_ + + :arg body: The definition of `commands` to perform (`move`, + `cancel`, `allocate`) + :arg dry_run: Simulate the operation only and return the + resulting state + :arg explain: Return an explanation of why the commands can or + cannot be executed + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg metric: Limit the information returned to the specified + metrics. Defaults to all but metadata Valid choices: _all, blocks, + metadata, nodes, routing_table, master_node, version + :arg retry_failed: Retries allocation of shards that are blocked + due to too many subsequent allocation failures + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "POST", "/_cluster/reroute", params=params, headers=headers, body=body + ) + + @query_params("flat_settings", "include_defaults", "master_timeout", "timeout") + def get_settings(self, params=None, headers=None): + """ + Returns cluster settings. + ``_ + + :arg flat_settings: Return settings in flat format (default: + false) + :arg include_defaults: Whether to return all default clusters + setting. + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", "/_cluster/settings", params=params, headers=headers + ) + + @query_params("flat_settings", "master_timeout", "timeout") + def put_settings(self, body, params=None, headers=None): + """ + Updates the cluster settings. + ``_ + + :arg body: The settings to be updated. Can be either `transient` + or `persistent` (survives cluster restart). + :arg flat_settings: Return settings in flat format (default: + false) + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", "/_cluster/settings", params=params, headers=headers, body=body + ) + + @query_params() + def remote_info(self, params=None, headers=None): + """ + Returns the information about configured remote clusters. + ``_ + """ + return self.transport.perform_request( + "GET", "/_remote/info", params=params, headers=headers + ) + + @query_params("include_disk_info", "include_yes_decisions") + def allocation_explain(self, body=None, params=None, headers=None): + """ + Provides explanations for shard allocations in the cluster. + ``_ + + :arg body: The index, shard, and primary flag to explain. Empty + means 'explain the first unassigned shard' + :arg include_disk_info: Return information about disk usage and + shard sizes (default: false) + :arg include_yes_decisions: Return 'YES' decisions in + explanation (default: false) + """ + return self.transport.perform_request( + "POST", + "/_cluster/allocation/explain", + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "timeout") + def delete_component_template(self, name, params=None, headers=None): + """ + Deletes a component template + ``_ + + :arg name: The name of the template + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_component_template", name), + params=params, + headers=headers, + ) + + @query_params("local", "master_timeout") + def get_component_template(self, name=None, params=None, headers=None): + """ + Returns one or more component templates + ``_ + + :arg name: The comma separated names of the component templates + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", + _make_path("_component_template", name), + params=params, + headers=headers, + ) + + @query_params("create", "master_timeout", "timeout") + def put_component_template(self, name, body, params=None, headers=None): + """ + Creates or updates a component template + ``_ + + :arg name: The name of the template + :arg body: The template definition + :arg create: Whether the index template should only be added if + new or can also replace an existing one + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_component_template", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("local", "master_timeout") + def exists_component_template(self, name, params=None, headers=None): + """ + Returns information about whether a particular component template exist + ``_ + + :arg name: The name of the template + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "HEAD", + _make_path("_component_template", name), + params=params, + headers=headers, + ) + + @query_params("wait_for_removal") + def delete_voting_config_exclusions(self, params=None, headers=None): + """ + Clears cluster voting config exclusions. + ``_ + + :arg wait_for_removal: Specifies whether to wait for all + excluded nodes to be removed from the cluster before clearing the voting + configuration exclusions list. Default: True + """ + return self.transport.perform_request( + "DELETE", + "/_cluster/voting_config_exclusions", + params=params, + headers=headers, + ) + + @query_params("node_ids", "node_names", "timeout") + def post_voting_config_exclusions(self, params=None, headers=None): + """ + Updates the cluster voting config exclusions by node ids or node names. + ``_ + + :arg node_ids: A comma-separated list of the persistent ids of + the nodes to exclude from the voting configuration. If specified, you + may not also specify ?node_names. + :arg node_names: A comma-separated list of the names of the + nodes to exclude from the voting configuration. If specified, you may + not also specify ?node_ids. + :arg timeout: Explicit operation timeout Default: 30s + """ + return self.transport.perform_request( + "POST", "/_cluster/voting_config_exclusions", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py new file mode 100644 index 000000000..ba51d0e19 --- /dev/null +++ b/elasticsearch/_async/client/enrich.py @@ -0,0 +1,89 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class EnrichClient(NamespacedClient): + @query_params() + def delete_policy(self, name, params=None, headers=None): + """ + Deletes an existing enrich policy and its enrich index. + ``_ + + :arg name: The name of the enrich policy + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_enrich", "policy", name), + params=params, + headers=headers, + ) + + @query_params("wait_for_completion") + def execute_policy(self, name, params=None, headers=None): + """ + Creates the enrich index for an existing enrich policy. + ``_ + + :arg name: The name of the enrich policy + :arg wait_for_completion: Should the request should block until + the execution is complete. Default: True + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "PUT", + _make_path("_enrich", "policy", name, "_execute"), + params=params, + headers=headers, + ) + + @query_params() + def get_policy(self, name=None, params=None, headers=None): + """ + Gets information about an enrich policy. + ``_ + + :arg name: A comma-separated list of enrich policy names + """ + return self.transport.perform_request( + "GET", _make_path("_enrich", "policy", name), params=params, headers=headers + ) + + @query_params() + def put_policy(self, name, body, params=None, headers=None): + """ + Creates a new enrich policy. + ``_ + + :arg name: The name of the enrich policy + :arg body: The enrich policy to register + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_enrich", "policy", name), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def stats(self, params=None, headers=None): + """ + Gets enrich coordinator statistics and information about enrich policies that + are currently executing. + ``_ + """ + return self.transport.perform_request( + "GET", "/_enrich/_stats", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py new file mode 100644 index 000000000..01bb4aaab --- /dev/null +++ b/elasticsearch/_async/client/eql.py @@ -0,0 +1,29 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path + + +class EqlClient(NamespacedClient): + @query_params() + def search(self, index, body, params=None, headers=None): + """ + Returns results matching a query expressed in Event Query Language (EQL) + ``_ + + :arg index: The name of the index to scope the operation + :arg body: Eql request body. Use the `query` to limit the query + scope. + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_eql", "search"), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py new file mode 100644 index 000000000..3c5606155 --- /dev/null +++ b/elasticsearch/_async/client/graph.py @@ -0,0 +1,31 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class GraphClient(NamespacedClient): + @query_params("routing", "timeout") + def explore(self, index, body=None, params=None, headers=None): + """ + Explore extracted and summarized information about the documents and terms in + an index. + ``_ + + :arg index: A comma-separated list of index names to search; use + `_all` or empty string to perform the operation on all indices + :arg body: Graph Query DSL + :arg routing: Specific routing value + :arg timeout: Explicit operation timeout + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path(index, "_graph", "explore"), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py new file mode 100644 index 000000000..4e09316d6 --- /dev/null +++ b/elasticsearch/_async/client/ilm.py @@ -0,0 +1,162 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class IlmClient(NamespacedClient): + @query_params() + def delete_lifecycle(self, policy, params=None, headers=None): + """ + Deletes the specified lifecycle policy definition. A currently used policy + cannot be deleted. + ``_ + + :arg policy: The name of the index lifecycle policy + """ + if policy in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ilm", "policy", policy), + params=params, + headers=headers, + ) + + @query_params("only_errors", "only_managed") + def explain_lifecycle(self, index, params=None, headers=None): + """ + Retrieves information about the index's current lifecycle state, such as the + currently executing phase, action, and step. + ``_ + + :arg index: The name of the index to explain + :arg only_errors: filters the indices included in the response + to ones in an ILM error state, implies only_managed + :arg only_managed: filters the indices included in the response + to ones managed by ILM + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_ilm", "explain"), params=params, headers=headers + ) + + @query_params() + def get_lifecycle(self, policy=None, params=None, headers=None): + """ + Returns the specified policy definition. Includes the policy version and last + modified date. + ``_ + + :arg policy: The name of the index lifecycle policy + """ + return self.transport.perform_request( + "GET", _make_path("_ilm", "policy", policy), params=params, headers=headers + ) + + @query_params() + def get_status(self, params=None, headers=None): + """ + Retrieves the current index lifecycle management (ILM) status. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ilm/status", params=params, headers=headers + ) + + @query_params() + def move_to_step(self, index, body=None, params=None, headers=None): + """ + Manually moves an index into the specified step and executes that step. + ``_ + + :arg index: The name of the index whose lifecycle step is to + change + :arg body: The new lifecycle step to move to + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", + _make_path("_ilm", "move", index), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_lifecycle(self, policy, body=None, params=None, headers=None): + """ + Creates a lifecycle policy + ``_ + + :arg policy: The name of the index lifecycle policy + :arg body: The lifecycle policy definition to register + """ + if policy in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy'.") + + return self.transport.perform_request( + "PUT", + _make_path("_ilm", "policy", policy), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def remove_policy(self, index, params=None, headers=None): + """ + Removes the assigned lifecycle policy and stops managing the specified index + ``_ + + :arg index: The name of the index to remove policy on + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_ilm", "remove"), params=params, headers=headers + ) + + @query_params() + def retry(self, index, params=None, headers=None): + """ + Retries executing the policy for an index that is in the ERROR step. + ``_ + + :arg index: The name of the indices (comma-separated) whose + failed lifecycle step is to be retry + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_ilm", "retry"), params=params, headers=headers + ) + + @query_params() + def start(self, params=None, headers=None): + """ + Start the index lifecycle management (ILM) plugin. + ``_ + """ + return self.transport.perform_request( + "POST", "/_ilm/start", params=params, headers=headers + ) + + @query_params() + def stop(self, params=None, headers=None): + """ + Halts all lifecycle management operations and stops the index lifecycle + management (ILM) plugin + ``_ + """ + return self.transport.perform_request( + "POST", "/_ilm/stop", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py new file mode 100644 index 000000000..68efaf626 --- /dev/null +++ b/elasticsearch/_async/client/indices.py @@ -0,0 +1,1364 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class IndicesClient(NamespacedClient): + @query_params() + def analyze(self, body=None, index=None, params=None, headers=None): + """ + Performs the analysis process on a text and return the tokens breakdown of the + text. + ``_ + + :arg body: Define analyzer/tokenizer parameters and the text on + which the analysis should be performed + :arg index: The name of the index to scope the operation + """ + return self.transport.perform_request( + "POST", + _make_path(index, "_analyze"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def refresh(self, index=None, params=None, headers=None): + """ + Performs the refresh operation in one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + return self.transport.perform_request( + "POST", _make_path(index, "_refresh"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "force", + "ignore_unavailable", + "wait_if_ongoing", + ) + def flush(self, index=None, params=None, headers=None): + """ + Performs the flush operation on one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string for all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg force: Whether a flush should be forced even if it is not + necessarily needed ie. if no changes will be committed to the index. + This is useful if transaction log IDs should be incremented even if no + uncommitted changes are present. (This setting can be considered as + internal) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg wait_if_ongoing: If set to true the flush operation will + block until the flush can be executed if another flush operation is + already executing. The default is true. If set to false the flush will + be skipped iff if another flush operation is already running. + """ + return self.transport.perform_request( + "POST", _make_path(index, "_flush"), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout", "wait_for_active_shards") + def create(self, index, body=None, params=None, headers=None): + """ + Creates an index with optional settings and mappings. + ``_ + + :arg index: The name of the index + :arg body: The configuration for the index (`settings` and + `mappings`) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "PUT", _make_path(index), params=params, headers=headers, body=body + ) + + @query_params("master_timeout", "timeout", "wait_for_active_shards") + def clone(self, index, target, body=None, params=None, headers=None): + """ + Clones an index + ``_ + + :arg index: The name of the source index to clone + :arg target: The name of the target index to clone into + :arg body: The configuration for the target index (`settings` + and `aliases`) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the cloned index before the operation returns. + """ + for param in (index, target): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_clone", target), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "include_defaults", + "local", + "master_timeout", + ) + def get(self, index, params=None, headers=None): + """ + Returns information about one or more indices. + ``_ + + :arg index: A comma-separated list of index names + :arg allow_no_indices: Ignore if a wildcard expression resolves + to no concrete indices (default: false) + :arg expand_wildcards: Whether wildcard expressions should get + expanded to open or closed indices (default: open) Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + :arg include_defaults: Whether to return all default setting for + each of the indices. + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def open(self, index, params=None, headers=None): + """ + Opens an index. + ``_ + + :arg index: A comma separated list of indices to open + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: closed + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_open"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def close(self, index, params=None, headers=None): + """ + Closes an index. + ``_ + + :arg index: A comma separated list of indices to close + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_close"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + ) + def delete(self, index, params=None, headers=None): + """ + Deletes an index. + ``_ + + :arg index: A comma-separated list of indices to delete; use + `_all` or `*` string to delete all indices + :arg allow_no_indices: Ignore if a wildcard expression resolves + to no concrete indices (default: false) + :arg expand_wildcards: Whether wildcard expressions should get + expanded to open or closed indices (default: open) Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "DELETE", _make_path(index), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "include_defaults", + "local", + ) + def exists(self, index, params=None, headers=None): + """ + Returns information about whether a particular index exists. + ``_ + + :arg index: A comma-separated list of index names + :arg allow_no_indices: Ignore if a wildcard expression resolves + to no concrete indices (default: false) + :arg expand_wildcards: Whether wildcard expressions should get + expanded to open or closed indices (default: open) Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + :arg include_defaults: Whether to return all default setting for + each of the indices. + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "HEAD", _make_path(index), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") + def exists_type(self, index, doc_type, params=None, headers=None): + """ + Returns information about whether a particular document type exists. + (DEPRECATED) + ``_ + + :arg index: A comma-separated list of index names; use `_all` to + check the types across all indices + :arg doc_type: A comma-separated list of document types to check + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + for param in (index, doc_type): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "HEAD", + _make_path(index, "_mapping", doc_type), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + ) + def put_mapping(self, index, body, params=None, headers=None): + """ + Updates the index mappings. + ``_ + + :arg index: A comma-separated list of index names the mapping + should be added to (supports wildcards); use `_all` or omit to add the + mapping on all indices. + :arg body: The mapping definition + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_mapping"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "local", + "master_timeout", + ) + def get_mapping(self, index=None, params=None, headers=None): + """ + Returns mappings for one or more indices. + ``_ + + :arg index: A comma-separated list of index names + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + return self.transport.perform_request( + "GET", _make_path(index, "_mapping"), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def put_alias(self, index, name, body=None, params=None, headers=None): + """ + Creates or updates an alias. + ``_ + + :arg index: A comma-separated list of index names the alias + should point to (supports wildcards); use `_all` to perform the + operation on all indices. + :arg name: The name of the alias to be created or updated + :arg body: The settings for the alias, such as `routing` or + `filter` + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit timestamp for the document + """ + for param in (index, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_alias", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") + def exists_alias(self, name, index=None, params=None, headers=None): + """ + Returns information about whether a particular alias exists. + ``_ + + :arg name: A comma-separated list of alias names to return + :arg index: A comma-separated list of index names to filter + aliases + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "HEAD", _make_path(index, "_alias", name), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") + def get_alias(self, index=None, name=None, params=None, headers=None): + """ + Returns an alias. + ``_ + + :arg index: A comma-separated list of index names to filter + aliases + :arg name: A comma-separated list of alias names to return + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + return self.transport.perform_request( + "GET", _make_path(index, "_alias", name), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def update_aliases(self, body, params=None, headers=None): + """ + Updates index aliases. + ``_ + + :arg body: The definition of `actions` to perform + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Request timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_aliases", params=params, headers=headers, body=body + ) + + @query_params("master_timeout", "timeout") + def delete_alias(self, index, name, params=None, headers=None): + """ + Deletes an alias. + ``_ + + :arg index: A comma-separated list of index names (supports + wildcards); use `_all` for all indices + :arg name: A comma-separated list of aliases to delete (supports + wildcards); use `_all` to delete all aliases for the specified indices. + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit timestamp for the document + """ + for param in (index, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", _make_path(index, "_alias", name), params=params, headers=headers + ) + + @query_params("create", "master_timeout", "order") + def put_template(self, name, body, params=None, headers=None): + """ + Creates or updates an index template. + ``_ + + :arg name: The name of the template + :arg body: The template definition + :arg create: Whether the index template should only be added if + new or can also replace an existing one + :arg master_timeout: Specify timeout for connection to master + :arg order: The order for this template when merging multiple + matching ones (higher numbers are merged later, overriding the lower + numbers) + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_template", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("flat_settings", "local", "master_timeout") + def exists_template(self, name, params=None, headers=None): + """ + Returns information about whether a particular index template exists. + ``_ + + :arg name: The comma separated names of the index templates + :arg flat_settings: Return settings in flat format (default: + false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "HEAD", _make_path("_template", name), params=params, headers=headers + ) + + @query_params("flat_settings", "local", "master_timeout") + def get_template(self, name=None, params=None, headers=None): + """ + Returns an index template. + ``_ + + :arg name: The comma separated names of the index templates + :arg flat_settings: Return settings in flat format (default: + false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", _make_path("_template", name), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def delete_template(self, name, params=None, headers=None): + """ + Deletes an index template. + ``_ + + :arg name: The name of the template + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", _make_path("_template", name), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "include_defaults", + "local", + "master_timeout", + ) + def get_settings(self, index=None, name=None, params=None, headers=None): + """ + Returns settings for one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg name: The name of the settings that should be included + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: all + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_defaults: Whether to return all default setting for + each of the indices. + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Specify timeout for connection to master + """ + return self.transport.perform_request( + "GET", _make_path(index, "_settings", name), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flat_settings", + "ignore_unavailable", + "master_timeout", + "preserve_existing", + "timeout", + ) + def put_settings(self, body, index=None, params=None, headers=None): + """ + Updates the index settings. + ``_ + + :arg body: The index settings to be updated + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg flat_settings: Return settings in flat format (default: + false) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg preserve_existing: Whether to update existing settings. If + set to `true` existing settings on an index remain unchanged, the + default is `false` + :arg timeout: Explicit operation timeout + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_settings"), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "completion_fields", + "expand_wildcards", + "fielddata_fields", + "fields", + "forbid_closed_indices", + "groups", + "include_segment_file_sizes", + "include_unloaded_segments", + "level", + "types", + ) + def stats(self, index=None, metric=None, params=None, headers=None): + """ + Provides statistics on operations happening in an index. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg metric: Limit the information returned the specific + metrics. Valid choices: _all, completion, docs, fielddata, query_cache, + flush, get, indexing, merge, request_cache, refresh, search, segments, + store, warmer, suggest, bulk + :arg completion_fields: A comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg fielddata_fields: A comma-separated list of fields for + `fielddata` index metric (supports wildcards) + :arg fields: A comma-separated list of fields for `fielddata` + and `completion` index metric (supports wildcards) + :arg forbid_closed_indices: If set to false stats will also + collected from closed indices if explicitly specified or if + expand_wildcards expands to closed indices Default: True + :arg groups: A comma-separated list of search groups for + `search` index metric + :arg include_segment_file_sizes: Whether to report the + aggregated disk usage of each one of the Lucene index files (only + applies if segment stats are requested) + :arg include_unloaded_segments: If set to true segment stats + will include stats for segments that are not currently loaded into + memory + :arg level: Return stats aggregated at cluster, index or shard + level Valid choices: cluster, indices, shards Default: indices + :arg types: A comma-separated list of document types for the + `indexing` index metric + """ + return self.transport.perform_request( + "GET", _make_path(index, "_stats", metric), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_unavailable", "verbose" + ) + def segments(self, index=None, params=None, headers=None): + """ + Provides low-level information about segments in a Lucene index. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg verbose: Includes detailed memory usage by Lucene. + """ + return self.transport.perform_request( + "GET", _make_path(index, "_segments"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "fielddata", + "fields", + "ignore_unavailable", + "query", + "request", + ) + def clear_cache(self, index=None, params=None, headers=None): + """ + Clears all or specific caches for one or more indices. + ``_ + + :arg index: A comma-separated list of index name to limit the + operation + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg fielddata: Clear field data + :arg fields: A comma-separated list of fields to clear when + using the `fielddata` parameter (default: all) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg query: Clear query caches + :arg request: Clear request cache + """ + return self.transport.perform_request( + "POST", _make_path(index, "_cache", "clear"), params=params, headers=headers + ) + + @query_params("active_only", "detailed") + def recovery(self, index=None, params=None, headers=None): + """ + Returns information about ongoing index shard recoveries. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg active_only: Display only those recoveries that are + currently on-going + :arg detailed: Whether to display detailed information about + shard recovery + """ + return self.transport.perform_request( + "GET", _make_path(index, "_recovery"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "only_ancient_segments", + "wait_for_completion", + ) + def upgrade(self, index=None, params=None, headers=None): + """ + DEPRECATED Upgrades to the current version of Lucene. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg only_ancient_segments: If true, only ancient (an older + Lucene major release) segments will be upgraded + :arg wait_for_completion: Specify whether the request should + block until the all segments are upgraded (default: false) + """ + return self.transport.perform_request( + "POST", _make_path(index, "_upgrade"), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def get_upgrade(self, index=None, params=None, headers=None): + """ + DEPRECATED Returns a progress status of current upgrade. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + return self.transport.perform_request( + "GET", _make_path(index, "_upgrade"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_unavailable", "status" + ) + def shard_stores(self, index=None, params=None, headers=None): + """ + Provides store information for shard copies of indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg status: A comma-separated list of statuses used to filter + on shards to get store information for Valid choices: green, yellow, + red, all + """ + return self.transport.perform_request( + "GET", _make_path(index, "_shard_stores"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "flush", + "ignore_unavailable", + "max_num_segments", + "only_expunge_deletes", + ) + def forcemerge(self, index=None, params=None, headers=None): + """ + Performs the force merge operation on one or more indices. + ``_ + + :arg index: A comma-separated list of index names; use `_all` or + empty string to perform the operation on all indices + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg flush: Specify whether the index should be flushed after + performing the operation (default: true) + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg max_num_segments: The number of segments the index should + be merged into (default: dynamic) + :arg only_expunge_deletes: Specify whether the operation should + only expunge deleted documents + """ + return self.transport.perform_request( + "POST", _make_path(index, "_forcemerge"), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout", "wait_for_active_shards") + def shrink(self, index, target, body=None, params=None, headers=None): + """ + Allow to shrink an existing index into a new index with fewer primary shards. + ``_ + + :arg index: The name of the source index to shrink + :arg target: The name of the target index to shrink into + :arg body: The configuration for the target index (`settings` + and `aliases`) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the shrunken index before the operation returns. + """ + for param in (index, target): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_shrink", target), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "timeout", "wait_for_active_shards") + def split(self, index, target, body=None, params=None, headers=None): + """ + Allows you to split an existing index into a new index with more primary + shards. + ``_ + + :arg index: The name of the source index to split + :arg target: The name of the target index to split into + :arg body: The configuration for the target index (`settings` + and `aliases`) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the shrunken index before the operation returns. + """ + for param in (index, target): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path(index, "_split", target), + params=params, + headers=headers, + body=body, + ) + + @query_params("dry_run", "master_timeout", "timeout", "wait_for_active_shards") + def rollover(self, alias, body=None, new_index=None, params=None, headers=None): + """ + Updates an alias to point to a new index when the existing index is considered + to be too large or too old. + ``_ + + :arg alias: The name of the alias to rollover + :arg body: The conditions that needs to be met for executing + rollover + :arg new_index: The name of the rollover index + :arg dry_run: If set to true the rollover action will only be + validated but not actually performed even if a condition matches. The + default is false + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Set the number of active shards to + wait for on the newly created rollover index before the operation + returns. + """ + if alias in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'alias'.") + + return self.transport.perform_request( + "POST", + _make_path(alias, "_rollover", new_index), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def freeze(self, index, params=None, headers=None): + """ + Freezes an index. A frozen index has almost no overhead on the cluster (except + for maintaining its metadata in memory) and is read-only. + ``_ + + :arg index: The name of the index to freeze + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: closed + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_freeze"), params=params, headers=headers + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "master_timeout", + "timeout", + "wait_for_active_shards", + ) + def unfreeze(self, index, params=None, headers=None): + """ + Unfreezes an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. + ``_ + + :arg index: The name of the index to unfreeze + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: closed + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + :arg wait_for_active_shards: Sets the number of active shards to + wait for before the operation returns. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "POST", _make_path(index, "_unfreeze"), params=params, headers=headers + ) + + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def reload_search_analyzers(self, index, params=None, headers=None): + """ + Reloads an index's search analyzers and their resources. + ``_ + + :arg index: A comma-separated list of index names to reload + analyzers for + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", + _make_path(index, "_reload_search_analyzers"), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", + "expand_wildcards", + "ignore_unavailable", + "include_defaults", + "local", + ) + def get_field_mapping(self, fields, index=None, params=None, headers=None): + """ + Returns mapping for one or more fields. + ``_ + + :arg fields: A comma-separated list of fields + :arg index: A comma-separated list of index names + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg include_defaults: Whether the default mapping values should + be returned as well + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + if fields in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'fields'.") + + return self.transport.perform_request( + "GET", + _make_path(index, "_mapping", "field", fields), + params=params, + headers=headers, + ) + + @query_params( + "all_shards", + "allow_no_indices", + "analyze_wildcard", + "analyzer", + "default_operator", + "df", + "expand_wildcards", + "explain", + "ignore_unavailable", + "lenient", + "q", + "rewrite", + ) + def validate_query( + self, body=None, index=None, doc_type=None, params=None, headers=None + ): + """ + Allows a user to validate a potentially expensive query without executing it. + ``_ + + :arg body: The query definition specified with the Query DSL + :arg index: A comma-separated list of index names to restrict + the operation; use `_all` or empty string to perform the operation on + all indices + :arg doc_type: A comma-separated list of document types to + restrict the operation; leave empty to perform the operation on all + types + :arg all_shards: Execute validation on all shards instead of one + random shard per index + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg analyze_wildcard: Specify whether wildcard and prefix + queries should be analyzed (default: false) + :arg analyzer: The analyzer to use for the query string + :arg default_operator: The default operator for query string + query (AND or OR) Valid choices: AND, OR Default: OR + :arg df: The field to use as default where no field prefix is + given in the query string + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, hidden, none, all Default: open + :arg explain: Return detailed information about the error + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + :arg lenient: Specify whether format-based query failures (such + as providing text to a numeric field) should be ignored + :arg q: Query in the Lucene query string syntax + :arg rewrite: Provide a more detailed explanation showing the + actual Lucene query that will be executed. + """ + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_validate", "query"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def create_data_stream(self, name, body, params=None, headers=None): + """ + Creates or updates a data stream + ``_ + + :arg name: The name of the data stream + :arg body: The data stream definition + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_data_stream", name), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def delete_data_stream(self, name, params=None, headers=None): + """ + Deletes a data stream. + ``_ + + :arg name: The name of the data stream + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", _make_path("_data_stream", name), params=params, headers=headers + ) + + @query_params() + def get_data_streams(self, name=None, params=None, headers=None): + """ + Returns data streams. + ``_ + + :arg name: The name or wildcard expression of the requested data + streams + """ + return self.transport.perform_request( + "GET", _make_path("_data_streams", name), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def delete_index_template(self, name, params=None, headers=None): + """ + Deletes an index template. + ``_ + + :arg name: The name of the template + :arg master_timeout: Specify timeout for connection to master + :arg timeout: Explicit operation timeout + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_index_template", name), + params=params, + headers=headers, + ) + + @query_params("flat_settings", "local", "master_timeout") + def get_index_template(self, name=None, params=None, headers=None): + """ + Returns an index template. + ``_ + + :arg name: The comma separated names of the index templates + :arg flat_settings: Return settings in flat format (default: + false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", _make_path("_index_template", name), params=params, headers=headers + ) + + @query_params("cause", "create", "master_timeout") + def put_index_template(self, name, body, params=None, headers=None): + """ + Creates or updates an index template. + ``_ + + :arg name: The name of the template + :arg body: The template definition + :arg cause: User defined reason for creating/updating the index + template + :arg create: Whether the index template should only be added if + new or can also replace an existing one + :arg master_timeout: Specify timeout for connection to master + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_index_template", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("flat_settings", "local", "master_timeout") + def exists_index_template(self, name, params=None, headers=None): + """ + Returns information about whether a particular index template exists. + ``_ + + :arg name: The name of the template + :arg flat_settings: Return settings in flat format (default: + false) + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "HEAD", _make_path("_index_template", name), params=params, headers=headers + ) + + @query_params("cause", "create", "master_timeout") + def simulate_index_template(self, name, body=None, params=None, headers=None): + """ + Simulate matching the given index name against the index templates in the + system + ``_ + + :arg name: The name of the index (it must be a concrete index + name) + :arg body: New index template definition, which will be included + in the simulation, as if it already exists in the system + :arg cause: User defined reason for dry-run creating the new + template for simulation purposes + :arg create: Whether the index template we optionally defined in + the body should only be dry-run added if new or can also replace an + existing one + :arg master_timeout: Specify timeout for connection to master + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "POST", + _make_path("_index_template", "_simulate_index", name), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py new file mode 100644 index 000000000..c30c41dfb --- /dev/null +++ b/elasticsearch/_async/client/ingest.py @@ -0,0 +1,99 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class IngestClient(NamespacedClient): + @query_params("master_timeout") + def get_pipeline(self, id=None, params=None, headers=None): + """ + Returns a pipeline. + ``_ + + :arg id: Comma separated list of pipeline ids. Wildcards + supported + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout") + def put_pipeline(self, id, body, params=None, headers=None): + """ + Creates or updates a pipeline. + ``_ + + :arg id: Pipeline ID + :arg body: The ingest definition + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ingest", "pipeline", id), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "timeout") + def delete_pipeline(self, id, params=None, headers=None): + """ + Deletes a pipeline. + ``_ + + :arg id: Pipeline ID + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ingest", "pipeline", id), + params=params, + headers=headers, + ) + + @query_params("verbose") + def simulate(self, body, id=None, params=None, headers=None): + """ + Allows to simulate a pipeline with example documents. + ``_ + + :arg body: The simulate definition + :arg id: Pipeline ID + :arg verbose: Verbose mode. Display data output for each + processor in executed pipeline + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path("_ingest", "pipeline", id, "_simulate"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def processor_grok(self, params=None, headers=None): + """ + Returns a list of the built-in patterns. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ingest/processor/grok", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py new file mode 100644 index 000000000..e455725af --- /dev/null +++ b/elasticsearch/_async/client/license.py @@ -0,0 +1,98 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params + + +class LicenseClient(NamespacedClient): + @query_params() + def delete(self, params=None, headers=None): + """ + Deletes licensing information for the cluster + ``_ + """ + return self.transport.perform_request( + "DELETE", "/_license", params=params, headers=headers + ) + + @query_params("accept_enterprise", "local") + def get(self, params=None, headers=None): + """ + Retrieves licensing information for the cluster + ``_ + + :arg accept_enterprise: Supported for backwards compatibility + with 7.x. If this param is used it must be set to true + :arg local: Return local information, do not retrieve the state + from master node (default: false) + """ + return self.transport.perform_request( + "GET", "/_license", params=params, headers=headers + ) + + @query_params() + def get_basic_status(self, params=None, headers=None): + """ + Retrieves information about the status of the basic license. + ``_ + """ + return self.transport.perform_request( + "GET", "/_license/basic_status", params=params, headers=headers + ) + + @query_params() + def get_trial_status(self, params=None, headers=None): + """ + Retrieves information about the status of the trial license. + ``_ + """ + return self.transport.perform_request( + "GET", "/_license/trial_status", params=params, headers=headers + ) + + @query_params("acknowledge") + def post(self, body=None, params=None, headers=None): + """ + Updates the license for the cluster. + ``_ + + :arg body: licenses to be installed + :arg acknowledge: whether the user has acknowledged acknowledge + messages (default: false) + """ + return self.transport.perform_request( + "PUT", "/_license", params=params, headers=headers, body=body + ) + + @query_params("acknowledge") + def post_start_basic(self, params=None, headers=None): + """ + Starts an indefinite basic license. + ``_ + + :arg acknowledge: whether the user has acknowledged acknowledge + messages (default: false) + """ + return self.transport.perform_request( + "POST", "/_license/start_basic", params=params, headers=headers + ) + + @query_params("acknowledge", "doc_type") + def post_start_trial(self, params=None, headers=None): + """ + starts a limited time trial license. + ``_ + + :arg acknowledge: whether the user has acknowledged acknowledge + messages (default: false) + :arg doc_type: The type of trial license to generate (default: + "trial") + """ + # type is a reserved word so it cannot be used, use doc_type instead + if "doc_type" in params: + params["type"] = params.pop("doc_type") + + return self.transport.perform_request( + "POST", "/_license/start_trial", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py new file mode 100644 index 000000000..c58b987d7 --- /dev/null +++ b/elasticsearch/_async/client/migration.py @@ -0,0 +1,24 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path + + +class MigrationClient(NamespacedClient): + @query_params() + def deprecations(self, index=None, params=None, headers=None): + """ + Retrieves information about different cluster, node, and index level settings + that use deprecated features that will be removed or changed in the next major + version. + ``_ + + :arg index: Index pattern + """ + return self.transport.perform_request( + "GET", + _make_path(index, "_migration", "deprecations"), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py new file mode 100644 index 000000000..ce594396a --- /dev/null +++ b/elasticsearch/_async/client/ml.py @@ -0,0 +1,1480 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body + + +class MlClient(NamespacedClient): + @query_params("allow_no_jobs", "force", "timeout") + def close_job(self, job_id, body=None, params=None, headers=None): + """ + Closes one or more anomaly detection jobs. A job can be opened and closed + multiple times throughout its lifecycle. + ``_ + + :arg job_id: The name of the job to close + :arg body: The URL params optionally sent in the body + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + :arg force: True if the job should be forcefully closed + :arg timeout: Controls the time to wait until a job has closed. + Default to 30 minutes + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_close"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def delete_calendar(self, calendar_id, params=None, headers=None): + """ + Deletes a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to delete + """ + if calendar_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'calendar_id'." + ) + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "calendars", calendar_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_calendar_event(self, calendar_id, event_id, params=None, headers=None): + """ + Deletes scheduled events from a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg event_id: The ID of the event to remove from the calendar + """ + for param in (calendar_id, event_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "calendars", calendar_id, "events", event_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None): + """ + Deletes anomaly detection jobs from a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg job_id: The ID of the job to remove from the calendar + """ + for param in (calendar_id, job_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "calendars", calendar_id, "jobs", job_id), + params=params, + headers=headers, + ) + + @query_params("force") + def delete_datafeed(self, datafeed_id, params=None, headers=None): + """ + Deletes an existing datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to delete + :arg force: True if the datafeed should be forcefully deleted + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_expired_data(self, params=None, headers=None): + """ + Deletes expired and unused machine learning data. + ``_ + """ + return self.transport.perform_request( + "DELETE", "/_ml/_delete_expired_data", params=params, headers=headers + ) + + @query_params() + def delete_filter(self, filter_id, params=None, headers=None): + """ + Deletes a filter. + ``_ + + :arg filter_id: The ID of the filter to delete + """ + if filter_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'filter_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "filters", filter_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_forecasts", "timeout") + def delete_forecast(self, job_id, forecast_id=None, params=None, headers=None): + """ + Deletes forecasts from a machine learning job. + ``_ + + :arg job_id: The ID of the job from which to delete forecasts + :arg forecast_id: The ID of the forecast to delete, can be comma + delimited list. Leaving blank implies `_all` + :arg allow_no_forecasts: Whether to ignore if `_all` matches no + forecasts + :arg timeout: Controls the time to wait until the forecast(s) + are deleted. Default to 30 seconds + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "anomaly_detectors", job_id, "_forecast", forecast_id), + params=params, + headers=headers, + ) + + @query_params("force", "wait_for_completion") + def delete_job(self, job_id, params=None, headers=None): + """ + Deletes an existing anomaly detection job. + ``_ + + :arg job_id: The ID of the job to delete + :arg force: True if the job should be forcefully deleted + :arg wait_for_completion: Should this request wait until the + operation has completed before returning Default: True + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + ) + + @query_params() + def delete_model_snapshot(self, job_id, snapshot_id, params=None, headers=None): + """ + Deletes an existing model snapshot. + ``_ + + :arg job_id: The ID of the job to fetch + :arg snapshot_id: The ID of the snapshot to delete + """ + for param in (job_id, snapshot_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path( + "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id + ), + params=params, + headers=headers, + ) + + @query_params( + "charset", + "column_names", + "delimiter", + "explain", + "format", + "grok_pattern", + "has_header_row", + "line_merge_size_limit", + "lines_to_sample", + "quote", + "should_trim_fields", + "timeout", + "timestamp_field", + "timestamp_format", + ) + def find_file_structure(self, body, params=None, headers=None): + """ + Finds the structure of a text file. The text file must contain data that is + suitable to be ingested into Elasticsearch. + ``_ + + :arg body: The contents of the file to be analyzed + :arg charset: Optional parameter to specify the character set of + the file + :arg column_names: Optional parameter containing a comma + separated list of the column names for a delimited file + :arg delimiter: Optional parameter to specify the delimiter + character for a delimited file - must be a single character + :arg explain: Whether to include a commentary on how the + structure was derived + :arg format: Optional parameter to specify the high level file + format Valid choices: ndjson, xml, delimited, semi_structured_text + :arg grok_pattern: Optional parameter to specify the Grok + pattern that should be used to extract fields from messages in a semi- + structured text file + :arg has_header_row: Optional parameter to specify whether a + delimited file includes the column names in its first row + :arg line_merge_size_limit: Maximum number of characters + permitted in a single message when lines are merged to create messages. + Default: 10000 + :arg lines_to_sample: How many lines of the file should be + included in the analysis Default: 1000 + :arg quote: Optional parameter to specify the quote character + for a delimited file - must be a single character + :arg should_trim_fields: Optional parameter to specify whether + the values between delimiters in a delimited file should have whitespace + trimmed from them + :arg timeout: Timeout after which the analysis will be aborted + Default: 25s + :arg timestamp_field: Optional parameter to specify the + timestamp field in the file + :arg timestamp_format: Optional parameter to specify the + timestamp format in the file - may be either a Joda or Java time format + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + "/_ml/find_file_structure", + params=params, + headers=headers, + body=body, + ) + + @query_params("advance_time", "calc_interim", "end", "skip_time", "start") + def flush_job(self, job_id, body=None, params=None, headers=None): + """ + Forces any buffered data to be processed by the job. + ``_ + + :arg job_id: The name of the job to flush + :arg body: Flush parameters + :arg advance_time: Advances time to the given value generating + results and updating the model for the advanced interval + :arg calc_interim: Calculates interim results for the most + recent bucket or all buckets within the latency period + :arg end: When used in conjunction with calc_interim, specifies + the range of buckets on which to calculate interim results + :arg skip_time: Skips time to the given value without generating + results or updating the model for the skipped interval + :arg start: When used in conjunction with calc_interim, + specifies the range of buckets on which to calculate interim results + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_flush"), + params=params, + headers=headers, + body=body, + ) + + @query_params("duration", "expires_in") + def forecast(self, job_id, params=None, headers=None): + """ + Predicts the future behavior of a time series by using its historical behavior. + ``_ + + :arg job_id: The ID of the job to forecast for + :arg duration: The duration of the forecast + :arg expires_in: The time interval after which the forecast + expires. Expired forecasts will be deleted at the first opportunity. + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_forecast"), + params=params, + headers=headers, + ) + + @query_params( + "anomaly_score", + "desc", + "end", + "exclude_interim", + "expand", + "from_", + "size", + "sort", + "start", + ) + def get_buckets(self, job_id, body=None, timestamp=None, params=None, headers=None): + """ + Retrieves anomaly detection job results for one or more buckets. + ``_ + + :arg job_id: ID of the job to get bucket results from + :arg body: Bucket selection details if not provided in URI + :arg timestamp: The timestamp of the desired single bucket + result + :arg anomaly_score: Filter for the most anomalous buckets + :arg desc: Set the sort direction + :arg end: End time filter for buckets + :arg exclude_interim: Exclude interim results + :arg expand: Include anomaly records + :arg from\\_: skips a number of buckets + :arg size: specifies a max number of buckets to get + :arg sort: Sort buckets by a particular field + :arg start: Start time filter for buckets + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "results", "buckets", timestamp + ), + params=params, + headers=headers, + body=body, + ) + + @query_params("end", "from_", "job_id", "size", "start") + def get_calendar_events(self, calendar_id, params=None, headers=None): + """ + Retrieves information about the scheduled events in calendars. + ``_ + + :arg calendar_id: The ID of the calendar containing the events + :arg end: Get events before this time + :arg from\\_: Skips a number of events + :arg job_id: Get events for the job. When this option is used + calendar_id must be '_all' + :arg size: Specifies a max number of events to get + :arg start: Get events after this time + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if calendar_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'calendar_id'." + ) + + return self.transport.perform_request( + "GET", + _make_path("_ml", "calendars", calendar_id, "events"), + params=params, + headers=headers, + ) + + @query_params("from_", "size") + def get_calendars(self, body=None, calendar_id=None, params=None, headers=None): + """ + Retrieves configuration information for calendars. + ``_ + + :arg body: The from and size parameters optionally sent in the + body + :arg calendar_id: The ID of the calendar to fetch + :arg from\\_: skips a number of calendars + :arg size: specifies a max number of calendars to get + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "calendars", calendar_id), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_datafeeds") + def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None): + """ + Retrieves usage information for datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeeds stats to fetch + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "datafeeds", datafeed_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params("allow_no_datafeeds") + def get_datafeeds(self, datafeed_id=None, params=None, headers=None): + """ + Retrieves configuration information for datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeeds to fetch + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + ) + + @query_params("from_", "size") + def get_filters(self, filter_id=None, params=None, headers=None): + """ + Retrieves filters. + ``_ + + :arg filter_id: The ID of the filter to fetch + :arg from\\_: skips a number of filters + :arg size: specifies a max number of filters to get + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "filters", filter_id), + params=params, + headers=headers, + ) + + @query_params( + "desc", + "end", + "exclude_interim", + "from_", + "influencer_score", + "size", + "sort", + "start", + ) + def get_influencers(self, job_id, body=None, params=None, headers=None): + """ + Retrieves anomaly detection job results for one or more influencers. + ``_ + + :arg job_id: Identifier for the anomaly detection job + :arg body: Influencer selection criteria + :arg desc: whether the results should be sorted in decending + order + :arg end: end timestamp for the requested influencers + :arg exclude_interim: Exclude interim results + :arg from\\_: skips a number of influencers + :arg influencer_score: influencer score threshold for the + requested influencers + :arg size: specifies a max number of influencers to get + :arg sort: sort field for the requested influencers + :arg start: start timestamp for the requested influencers + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "results", "influencers"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_jobs") + def get_job_stats(self, job_id=None, params=None, headers=None): + """ + Retrieves usage information for anomaly detection jobs. + ``_ + + :arg job_id: The ID of the jobs stats to fetch + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "anomaly_detectors", job_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params("allow_no_jobs") + def get_jobs(self, job_id=None, params=None, headers=None): + """ + Retrieves configuration information for anomaly detection jobs. + ``_ + + :arg job_id: The ID of the jobs to fetch + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + """ + return self.transport.perform_request( + "GET", + _make_path("_ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_jobs", + "bucket_span", + "end", + "exclude_interim", + "overall_score", + "start", + "top_n", + ) + def get_overall_buckets(self, job_id, body=None, params=None, headers=None): + """ + Retrieves overall bucket results that summarize the bucket results of multiple + anomaly detection jobs. + ``_ + + :arg job_id: The job IDs for which to calculate overall bucket + results + :arg body: Overall bucket selection details if not provided in + URI + :arg allow_no_jobs: Whether to ignore if a wildcard expression + matches no jobs. (This includes `_all` string or when no jobs have been + specified) + :arg bucket_span: The span of the overall buckets. Defaults to + the longest job bucket_span + :arg end: Returns overall buckets with timestamps earlier than + this time + :arg exclude_interim: If true overall buckets that include + interim buckets will be excluded + :arg overall_score: Returns overall buckets with overall scores + higher than this value + :arg start: Returns overall buckets with timestamps after this + time + :arg top_n: The number of top job bucket scores to be used in + the overall_score calculation + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "results", "overall_buckets" + ), + params=params, + headers=headers, + body=body, + ) + + @query_params( + "desc", + "end", + "exclude_interim", + "from_", + "record_score", + "size", + "sort", + "start", + ) + def get_records(self, job_id, body=None, params=None, headers=None): + """ + Retrieves anomaly records for an anomaly detection job. + ``_ + + :arg job_id: The ID of the job + :arg body: Record selection criteria + :arg desc: Set the sort direction + :arg end: End time filter for records + :arg exclude_interim: Exclude interim results + :arg from\\_: skips a number of records + :arg record_score: Returns records with anomaly scores greater + or equal than this value + :arg size: specifies a max number of records to get + :arg sort: Sort records by a particular field + :arg start: Start time filter for records + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "results", "records"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def info(self, params=None, headers=None): + """ + Returns defaults and limits used by machine learning. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ml/info", params=params, headers=headers + ) + + @query_params() + def open_job(self, job_id, params=None, headers=None): + """ + Opens one or more anomaly detection jobs. + ``_ + + :arg job_id: The ID of the job to open + """ + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_open"), + params=params, + headers=headers, + ) + + @query_params() + def post_calendar_events(self, calendar_id, body, params=None, headers=None): + """ + Posts scheduled events in a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg body: A list of events + """ + for param in (calendar_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "calendars", calendar_id, "events"), + params=params, + headers=headers, + body=body, + ) + + @query_params("reset_end", "reset_start") + def post_data(self, job_id, body, params=None, headers=None): + """ + Sends data to an anomaly detection job for analysis. + ``_ + + :arg job_id: The name of the job receiving the data + :arg body: The data to process + :arg reset_end: Optional parameter to specify the end of the + bucket resetting range + :arg reset_start: Optional parameter to specify the start of the + bucket resetting range + """ + for param in (job_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_data"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def preview_datafeed(self, datafeed_id, params=None, headers=None): + """ + Previews a datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to preview + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "GET", + _make_path("_ml", "datafeeds", datafeed_id, "_preview"), + params=params, + headers=headers, + ) + + @query_params() + def put_calendar(self, calendar_id, body=None, params=None, headers=None): + """ + Instantiates a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to create + :arg body: The calendar details + """ + if calendar_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'calendar_id'." + ) + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "calendars", calendar_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_calendar_job(self, calendar_id, job_id, params=None, headers=None): + """ + Adds an anomaly detection job to a calendar. + ``_ + + :arg calendar_id: The ID of the calendar to modify + :arg job_id: The ID of the job to add to the calendar + """ + for param in (calendar_id, job_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "calendars", calendar_id, "jobs", job_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable" + ) + def put_datafeed(self, datafeed_id, body, params=None, headers=None): + """ + Instantiates a datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to create + :arg body: The datafeed config + :arg allow_no_indices: Ignore if the source indices expressions + resolves to no concrete indices (default: true) + :arg expand_wildcards: Whether source index expressions should + get expanded to open or closed indices (default: open) Valid choices: + open, closed, hidden, none, all + :arg ignore_throttled: Ignore indices that are marked as + throttled (default: true) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + """ + for param in (datafeed_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "datafeeds", datafeed_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_filter(self, filter_id, body, params=None, headers=None): + """ + Instantiates a filter. + ``_ + + :arg filter_id: The ID of the filter to create + :arg body: The filter details + """ + for param in (filter_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "filters", filter_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def put_job(self, job_id, body, params=None, headers=None): + """ + Instantiates an anomaly detection job. + ``_ + + :arg job_id: The ID of the job to create + :arg body: The job + """ + for param in (job_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "anomaly_detectors", job_id), + params=params, + headers=headers, + body=body, + ) + + @query_params("enabled", "timeout") + def set_upgrade_mode(self, params=None, headers=None): + """ + Sets a cluster wide upgrade_mode setting that prepares machine learning indices + for an upgrade. + ``_ + + :arg enabled: Whether to enable upgrade_mode ML setting or not. + Defaults to false. + :arg timeout: Controls the time to wait before action times out. + Defaults to 30 seconds + """ + return self.transport.perform_request( + "POST", "/_ml/set_upgrade_mode", params=params, headers=headers + ) + + @query_params("end", "start", "timeout") + def start_datafeed(self, datafeed_id, body=None, params=None, headers=None): + """ + Starts one or more datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeed to start + :arg body: The start datafeed parameters + :arg end: The end time when the datafeed should stop. When not + set, the datafeed continues in real time + :arg start: The start time from where the datafeed should begin + :arg timeout: Controls the time to wait until a datafeed has + started. Default to 20 seconds + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_ml", "datafeeds", datafeed_id, "_start"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_datafeeds", "force", "timeout") + def stop_datafeed(self, datafeed_id, params=None, headers=None): + """ + Stops one or more datafeeds. + ``_ + + :arg datafeed_id: The ID of the datafeed to stop + :arg allow_no_datafeeds: Whether to ignore if a wildcard + expression matches no datafeeds. (This includes `_all` string or when no + datafeeds have been specified) + :arg force: True if the datafeed should be forcefully stopped. + :arg timeout: Controls the time to wait until a datafeed has + stopped. Default to 20 seconds + """ + if datafeed_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'datafeed_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_ml", "datafeeds", datafeed_id, "_stop"), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable" + ) + def update_datafeed(self, datafeed_id, body, params=None, headers=None): + """ + Updates certain properties of a datafeed. + ``_ + + :arg datafeed_id: The ID of the datafeed to update + :arg body: The datafeed update settings + :arg allow_no_indices: Ignore if the source indices expressions + resolves to no concrete indices (default: true) + :arg expand_wildcards: Whether source index expressions should + get expanded to open or closed indices (default: open) Valid choices: + open, closed, hidden, none, all + :arg ignore_throttled: Ignore indices that are marked as + throttled (default: true) + :arg ignore_unavailable: Ignore unavailable indexes (default: + false) + """ + for param in (datafeed_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "datafeeds", datafeed_id, "_update"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def update_filter(self, filter_id, body, params=None, headers=None): + """ + Updates the description of a filter, adds items, or removes items. + ``_ + + :arg filter_id: The ID of the filter to update + :arg body: The filter update + """ + for param in (filter_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "filters", filter_id, "_update"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def update_job(self, job_id, body, params=None, headers=None): + """ + Updates certain properties of an anomaly detection job. + ``_ + + :arg job_id: The ID of the job to create + :arg body: The job update settings + """ + for param in (job_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "anomaly_detectors", job_id, "_update"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def validate(self, body, params=None, headers=None): + """ + Validates an anomaly detection job. + + :arg body: The job config + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/anomaly_detectors/_validate", + params=params, + headers=headers, + body=body, + ) + + @query_params() + def validate_detector(self, body, params=None, headers=None): + """ + Validates an anomaly detection detector. + + :arg body: The detector + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/anomaly_detectors/_validate/detector", + params=params, + headers=headers, + body=body, + ) + + @query_params("force") + def delete_data_frame_analytics(self, id, params=None, headers=None): + """ + Deletes an existing data frame analytics job. + ``_ + + :arg id: The ID of the data frame analytics to delete + :arg force: True if the job should be forcefully deleted + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "data_frame", "analytics", id), + params=params, + headers=headers, + ) + + @query_params() + def evaluate_data_frame(self, body, params=None, headers=None): + """ + Evaluates the data frame analytics for an annotated index. + ``_ + + :arg body: The evaluation definition + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/data_frame/_evaluate", + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_match", "from_", "size") + def get_data_frame_analytics(self, id=None, params=None, headers=None): + """ + Retrieves configuration information for data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no data frame analytics. (This includes `_all` string or when no + data frame analytics have been specified) Default: True + :arg from\\_: skips a number of analytics + :arg size: specifies a max number of analytics to get Default: + 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "data_frame", "analytics", id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_data_frame_analytics_stats(self, id=None, params=None, headers=None): + """ + Retrieves usage information for data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics stats to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no data frame analytics. (This includes `_all` string or when no + data frame analytics have been specified) Default: True + :arg from\\_: skips a number of analytics + :arg size: specifies a max number of analytics to get Default: + 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "data_frame", "analytics", id, "_stats"), + params=params, + headers=headers, + ) + + @query_params() + def put_data_frame_analytics(self, id, body, params=None, headers=None): + """ + Instantiates a data frame analytics job. + ``_ + + :arg id: The ID of the data frame analytics to create + :arg body: The data frame analytics configuration + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "data_frame", "analytics", id), + params=params, + headers=headers, + body=body, + ) + + @query_params("timeout") + def start_data_frame_analytics(self, id, body=None, params=None, headers=None): + """ + Starts a data frame analytics job. + ``_ + + :arg id: The ID of the data frame analytics to start + :arg body: The start data frame analytics parameters + :arg timeout: Controls the time to wait until the task has + started. Defaults to 20 seconds + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "data_frame", "analytics", id, "_start"), + params=params, + headers=headers, + body=body, + ) + + @query_params("allow_no_match", "force", "timeout") + def stop_data_frame_analytics(self, id, body=None, params=None, headers=None): + """ + Stops one or more data frame analytics jobs. + ``_ + + :arg id: The ID of the data frame analytics to stop + :arg body: The stop data frame analytics parameters + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no data frame analytics. (This includes `_all` string or when no + data frame analytics have been specified) + :arg force: True if the data frame analytics should be + forcefully stopped + :arg timeout: Controls the time to wait until the task has + stopped. Defaults to 20 seconds + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_ml", "data_frame", "analytics", id, "_stop"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def delete_trained_model(self, model_id, params=None, headers=None): + """ + Deletes an existing trained inference model that is currently not referenced by + an ingest pipeline. + ``_ + + :arg model_id: The ID of the trained model to delete + """ + if model_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'model_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_ml", "inference", model_id), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_match", + "decompress_definition", + "from_", + "include_model_definition", + "size", + "tags", + ) + def get_trained_models(self, model_id=None, params=None, headers=None): + """ + Retrieves configuration information for a trained inference model. + ``_ + + :arg model_id: The ID of the trained models to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no trained models. (This includes `_all` string or when no + trained models have been specified) Default: True + :arg decompress_definition: Should the model definition be + decompressed into valid JSON or returned in a custom compressed format. + Defaults to true. Default: True + :arg from\\_: skips a number of trained models + :arg include_model_definition: Should the full model definition + be included in the results. These definitions can be large. So be + cautious when including them. Defaults to false. + :arg size: specifies a max number of trained models to get + Default: 100 + :arg tags: A comma-separated list of tags that the model must + have. + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "inference", model_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_trained_models_stats(self, model_id=None, params=None, headers=None): + """ + Retrieves usage information for trained inference models. + ``_ + + :arg model_id: The ID of the trained models stats to fetch + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no trained models. (This includes `_all` string or when no + trained models have been specified) Default: True + :arg from\\_: skips a number of trained models + :arg size: specifies a max number of trained models to get + Default: 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_ml", "inference", model_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params() + def put_trained_model(self, model_id, body, params=None, headers=None): + """ + Creates an inference trained model. + ``_ + + :arg model_id: The ID of the trained models to store + :arg body: The trained model configuration + """ + for param in (model_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_ml", "inference", model_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def estimate_model_memory(self, body, params=None, headers=None): + """ + Estimates the model memory + ``_ + + :arg body: The analysis config, plus cardinality estimates for + fields it references + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + "/_ml/anomaly_detectors/_estimate_model_memory", + params=params, + headers=headers, + body=body, + ) + + @query_params() + def explain_data_frame_analytics( + self, body=None, id=None, params=None, headers=None + ): + """ + Explains a data frame analytics config. + ``_ + + :arg body: The data frame analytics config to explain + :arg id: The ID of the data frame analytics to explain + """ + return self.transport.perform_request( + "POST", + _make_path("_ml", "data_frame", "analytics", id, "_explain"), + params=params, + headers=headers, + body=body, + ) + + @query_params("from_", "size") + def get_categories( + self, job_id, body=None, category_id=None, params=None, headers=None + ): + """ + Retrieves anomaly detection job results for one or more categories. + ``_ + + :arg job_id: The name of the job + :arg body: Category selection details if not provided in URI + :arg category_id: The identifier of the category definition of + interest + :arg from\\_: skips a number of categories + :arg size: specifies a max number of categories to get + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "results", "categories", category_id + ), + params=params, + headers=headers, + body=body, + ) + + @query_params("desc", "end", "from_", "size", "sort", "start") + def get_model_snapshots( + self, job_id, body=None, snapshot_id=None, params=None, headers=None + ): + """ + Retrieves information about model snapshots. + ``_ + + :arg job_id: The ID of the job to fetch + :arg body: Model snapshot selection criteria + :arg snapshot_id: The ID of the snapshot to fetch + :arg desc: True if the results should be sorted in descending + order + :arg end: The filter 'end' query parameter + :arg from\\_: Skips a number of documents + :arg size: The default number of documents returned in queries + as a string. + :arg sort: Name of the field to sort on + :arg start: The filter 'start' query parameter + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'job_id'.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id + ), + params=params, + headers=headers, + body=body, + ) + + @query_params("delete_intervening_results") + def revert_model_snapshot( + self, job_id, snapshot_id, body=None, params=None, headers=None + ): + """ + Reverts to a specific snapshot. + ``_ + + :arg job_id: The ID of the job to fetch + :arg snapshot_id: The ID of the snapshot to revert to + :arg body: Reversion options + :arg delete_intervening_results: Should we reset the results + back to the time of the snapshot? + """ + for param in (job_id, snapshot_id): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", + "anomaly_detectors", + job_id, + "model_snapshots", + snapshot_id, + "_revert", + ), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def update_model_snapshot( + self, job_id, snapshot_id, body, params=None, headers=None + ): + """ + Updates certain properties of a snapshot. + ``_ + + :arg job_id: The ID of the job to fetch + :arg snapshot_id: The ID of the snapshot to update + :arg body: The model snapshot properties to update + """ + for param in (job_id, snapshot_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path( + "_ml", + "anomaly_detectors", + job_id, + "model_snapshots", + snapshot_id, + "_update", + ), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py new file mode 100644 index 000000000..cf5677cd3 --- /dev/null +++ b/elasticsearch/_async/client/monitoring.py @@ -0,0 +1,34 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH, _bulk_body + + +class MonitoringClient(NamespacedClient): + @query_params("interval", "system_api_version", "system_id") + def bulk(self, body, doc_type=None, params=None, headers=None): + """ + Used by the monitoring features to send monitoring data. + ``_ + + :arg body: The operation definition and data (action-data + pairs), separated by newlines + :arg doc_type: Default document type for items which don't + provide one + :arg interval: Collection interval (e.g., '10s' or '10000ms') of + the payload + :arg system_api_version: API Version of the monitored system + :arg system_id: Identifier of the monitored system + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + body = _bulk_body(self.transport.serializer, body) + return self.transport.perform_request( + "POST", + _make_path("_monitoring", doc_type, "bulk"), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py new file mode 100644 index 000000000..c3aaec3af --- /dev/null +++ b/elasticsearch/_async/client/nodes.py @@ -0,0 +1,160 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path + + +class NodesClient(NamespacedClient): + @query_params("timeout") + def reload_secure_settings( + self, body=None, node_id=None, params=None, headers=None + ): + """ + Reloads secure settings. + ``_ + + :arg body: An object containing the password for the + elasticsearch keystore + :arg node_id: A comma-separated list of node IDs to span the + reload/reinit call. Should stay empty because reloading usually involves + all cluster nodes. + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "POST", + _make_path("_nodes", node_id, "reload_secure_settings"), + params=params, + headers=headers, + body=body, + ) + + @query_params("flat_settings", "timeout") + def info(self, node_id=None, metric=None, params=None, headers=None): + """ + Returns information about nodes in the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg metric: A comma-separated list of metrics you wish + returned. Leave empty to return all. Valid choices: settings, os, + process, jvm, thread_pool, transport, http, plugins, ingest + :arg flat_settings: Return settings in flat format (default: + false) + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", _make_path("_nodes", node_id, metric), params=params, headers=headers + ) + + @query_params( + "doc_type", "ignore_idle_threads", "interval", "snapshots", "threads", "timeout" + ) + def hot_threads(self, node_id=None, params=None, headers=None): + """ + Returns information about hot threads on each node in the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg doc_type: The type to sample (default: cpu) Valid choices: + cpu, wait, block + :arg ignore_idle_threads: Don't show threads that are in known- + idle places, such as waiting on a socket select or pulling from an empty + task queue (default: true) + :arg interval: The interval for the second sampling of threads + :arg snapshots: Number of samples of thread stacktrace (default: + 10) + :arg threads: Specify the number of threads to provide + information for (default: 3) + :arg timeout: Explicit operation timeout + """ + # type is a reserved word so it cannot be used, use doc_type instead + if "doc_type" in params: + params["type"] = params.pop("doc_type") + + return self.transport.perform_request( + "GET", + _make_path("_nodes", node_id, "hot_threads"), + params=params, + headers=headers, + ) + + @query_params("timeout") + def usage(self, node_id=None, metric=None, params=None, headers=None): + """ + Returns low-level information about REST actions usage on nodes. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg metric: Limit the information returned to the specified + metrics Valid choices: _all, rest_actions + :arg timeout: Explicit operation timeout + """ + return self.transport.perform_request( + "GET", + _make_path("_nodes", node_id, "usage", metric), + params=params, + headers=headers, + ) + + @query_params( + "completion_fields", + "fielddata_fields", + "fields", + "groups", + "include_segment_file_sizes", + "level", + "timeout", + "types", + ) + def stats( + self, node_id=None, metric=None, index_metric=None, params=None, headers=None + ): + """ + Returns statistical information about nodes in the cluster. + ``_ + + :arg node_id: A comma-separated list of node IDs or names to + limit the returned information; use `_local` to return information from + the node you're connecting to, leave empty to get information from all + nodes + :arg metric: Limit the information returned to the specified + metrics Valid choices: _all, breaker, fs, http, indices, jvm, os, + process, thread_pool, transport, discovery + :arg index_metric: Limit the information returned for `indices` + metric to the specific index metrics. Isn't used if `indices` (or `all`) + metric isn't specified. Valid choices: _all, completion, docs, + fielddata, query_cache, flush, get, indexing, merge, request_cache, + refresh, search, segments, store, warmer, suggest, bulk + :arg completion_fields: A comma-separated list of fields for + `fielddata` and `suggest` index metric (supports wildcards) + :arg fielddata_fields: A comma-separated list of fields for + `fielddata` index metric (supports wildcards) + :arg fields: A comma-separated list of fields for `fielddata` + and `completion` index metric (supports wildcards) + :arg groups: A comma-separated list of search groups for + `search` index metric + :arg include_segment_file_sizes: Whether to report the + aggregated disk usage of each one of the Lucene index files (only + applies if segment stats are requested) + :arg level: Return indices stats aggregated at index, node or + shard level Valid choices: indices, node, shards Default: node + :arg timeout: Explicit operation timeout + :arg types: A comma-separated list of document types for the + `indexing` index metric + """ + return self.transport.perform_request( + "GET", + _make_path("_nodes", node_id, "stats", metric, index_metric), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/remote.py b/elasticsearch/_async/client/remote.py new file mode 100644 index 000000000..2c2767b1d --- /dev/null +++ b/elasticsearch/_async/client/remote.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params + + +class RemoteClient(NamespacedClient): + @query_params() + def info(self, params=None, headers=None): + """ + ``_ + """ + return self.transport.perform_request( + "GET", "/_remote/info", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py new file mode 100644 index 000000000..0ee11270b --- /dev/null +++ b/elasticsearch/_async/client/rollup.py @@ -0,0 +1,155 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class RollupClient(NamespacedClient): + @query_params() + def delete_job(self, id, params=None, headers=None): + """ + Deletes an existing rollup job. + ``_ + + :arg id: The ID of the job to delete + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", _make_path("_rollup", "job", id), params=params, headers=headers + ) + + @query_params() + def get_jobs(self, id=None, params=None, headers=None): + """ + Retrieves the configuration, stats, and status of rollup jobs. + ``_ + + :arg id: The ID of the job(s) to fetch. Accepts glob patterns, + or left blank for all jobs + """ + return self.transport.perform_request( + "GET", _make_path("_rollup", "job", id), params=params, headers=headers + ) + + @query_params() + def get_rollup_caps(self, id=None, params=None, headers=None): + """ + Returns the capabilities of any rollup jobs that have been configured for a + specific index or index pattern. + ``_ + + :arg id: The ID of the index to check rollup capabilities on, or + left blank for all jobs + """ + return self.transport.perform_request( + "GET", _make_path("_rollup", "data", id), params=params, headers=headers + ) + + @query_params() + def get_rollup_index_caps(self, index, params=None, headers=None): + """ + Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the + index where rollup data is stored). + ``_ + + :arg index: The rollup index or index pattern to obtain rollup + capabilities from. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'index'.") + + return self.transport.perform_request( + "GET", _make_path(index, "_rollup", "data"), params=params, headers=headers + ) + + @query_params() + def put_job(self, id, body, params=None, headers=None): + """ + Creates a rollup job. + ``_ + + :arg id: The ID of the job to create + :arg body: The job configuration + """ + for param in (id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_rollup", "job", id), + params=params, + headers=headers, + body=body, + ) + + @query_params("rest_total_hits_as_int", "typed_keys") + def rollup_search(self, index, body, doc_type=None, params=None, headers=None): + """ + Enables searching rolled-up data using the standard query DSL. + ``_ + + :arg index: The indices or index-pattern(s) (containing rollup + or regular data) that should be searched + :arg body: The search request body + :arg doc_type: The doc type inside the index + :arg rest_total_hits_as_int: Indicates whether hits.total should + be rendered as an integer or an object in the rest search response + :arg typed_keys: Specify whether aggregation and suggester names + should be prefixed by their respective types in the response + """ + for param in (index, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path(index, doc_type, "_rollup_search"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def start_job(self, id, params=None, headers=None): + """ + Starts an existing, stopped rollup job. + ``_ + + :arg id: The ID of the job to start + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_rollup", "job", id, "_start"), + params=params, + headers=headers, + ) + + @query_params("timeout", "wait_for_completion") + def stop_job(self, id, params=None, headers=None): + """ + Stops an existing, started rollup job. + ``_ + + :arg id: The ID of the job to stop + :arg timeout: Block for (at maximum) the specified duration + while waiting for the job to stop. Defaults to 30s. + :arg wait_for_completion: True if the API should block until the + job has fully stopped, false if should be executed async. Defaults to + false. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "POST", + _make_path("_rollup", "job", id, "_stop"), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py new file mode 100644 index 000000000..1616493de --- /dev/null +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -0,0 +1,92 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class SearchableSnapshotsClient(NamespacedClient): + @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") + def clear_cache(self, index=None, params=None, headers=None): + """ + Clear the cache of searchable snapshots. + ``_ + + :arg index: A comma-separated list of index name to limit the + operation + :arg allow_no_indices: Whether to ignore if a wildcard indices + expression resolves into no concrete indices. (This includes `_all` + string or when no indices have been specified) + :arg expand_wildcards: Whether to expand wildcard expression to + concrete indices that are open, closed or both. Valid choices: open, + closed, none, all Default: open + :arg ignore_unavailable: Whether specified concrete indices + should be ignored when unavailable (missing or closed) + """ + return self.transport.perform_request( + "POST", + _make_path(index, "_searchable_snapshots", "cache", "clear"), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "wait_for_completion") + def mount(self, repository, snapshot, body, params=None, headers=None): + """ + Mount a snapshot as a searchable index. + ``_ + + :arg repository: The name of the repository containing the + snapshot of the index to mount + :arg snapshot: The name of the snapshot of the index to mount + :arg body: The restore configuration for mounting the snapshot + as searchable + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg wait_for_completion: Should this request wait until the + operation has completed before returning + """ + for param in (repository, snapshot, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_snapshot", repository, snapshot, "_mount"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def repository_stats(self, repository, params=None, headers=None): + """ + Retrieve usage statistics about a snapshot repository. + ``_ + + :arg repository: The repository for which to get the stats for + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + + return self.transport.perform_request( + "GET", + _make_path("_snapshot", repository, "_stats"), + params=params, + headers=headers, + ) + + @query_params() + def stats(self, index=None, params=None, headers=None): + """ + Retrieve various statistics about searchable snapshots. + ``_ + + :arg index: A comma-separated list of index names + """ + return self.transport.perform_request( + "GET", + _make_path(index, "_searchable_snapshots", "stats"), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py new file mode 100644 index 000000000..90e2ce7cb --- /dev/null +++ b/elasticsearch/_async/client/security.py @@ -0,0 +1,497 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class SecurityClient(NamespacedClient): + @query_params() + def authenticate(self, params=None, headers=None): + """ + Enables authentication as a user and retrieve information about the + authenticated user. + ``_ + """ + return self.transport.perform_request( + "GET", "/_security/_authenticate", params=params, headers=headers + ) + + @query_params("refresh") + def change_password(self, body, username=None, params=None, headers=None): + """ + Changes the passwords of users in the native realm and built-in users. + ``_ + + :arg body: the new password for the user + :arg username: The username of the user to change the password + for + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username, "_password"), + params=params, + headers=headers, + body=body, + ) + + @query_params("usernames") + def clear_cached_realms(self, realms, params=None, headers=None): + """ + Evicts users from the user cache. Can completely clear the cache or evict + specific users. + ``_ + + :arg realms: Comma-separated list of realms to clear + :arg usernames: Comma-separated list of usernames to clear from + the cache + """ + if realms in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'realms'.") + + return self.transport.perform_request( + "POST", + _make_path("_security", "realm", realms, "_clear_cache"), + params=params, + headers=headers, + ) + + @query_params() + def clear_cached_roles(self, name, params=None, headers=None): + """ + Evicts roles from the native role cache. + ``_ + + :arg name: Role name + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "POST", + _make_path("_security", "role", name, "_clear_cache"), + params=params, + headers=headers, + ) + + @query_params("refresh") + def create_api_key(self, body, params=None, headers=None): + """ + Creates an API key for access without requiring basic authentication. + ``_ + + :arg body: The api key request to create an API key + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", "/_security/api_key", params=params, headers=headers, body=body + ) + + @query_params("refresh") + def delete_privileges(self, application, name, params=None, headers=None): + """ + Removes application privileges. + ``_ + + :arg application: Application name + :arg name: Privilege name + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (application, name): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "privilege", application, name), + params=params, + headers=headers, + ) + + @query_params("refresh") + def delete_role(self, name, params=None, headers=None): + """ + Removes roles in the native realm. + ``_ + + :arg name: Role name + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "role", name), + params=params, + headers=headers, + ) + + @query_params("refresh") + def delete_role_mapping(self, name, params=None, headers=None): + """ + Removes role mappings. + ``_ + + :arg name: Role-mapping name + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'name'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "role_mapping", name), + params=params, + headers=headers, + ) + + @query_params("refresh") + def delete_user(self, username, params=None, headers=None): + """ + Deletes users from the native realm. + ``_ + + :arg username: username + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if username in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'username'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_security", "user", username), + params=params, + headers=headers, + ) + + @query_params("refresh") + def disable_user(self, username, params=None, headers=None): + """ + Disables users in the native realm. + ``_ + + :arg username: The username of the user to disable + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if username in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'username'.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username, "_disable"), + params=params, + headers=headers, + ) + + @query_params("refresh") + def enable_user(self, username, params=None, headers=None): + """ + Enables users in the native realm. + ``_ + + :arg username: The username of the user to enable + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if username in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'username'.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username, "_enable"), + params=params, + headers=headers, + ) + + @query_params("id", "name", "owner", "realm_name", "username") + def get_api_key(self, params=None, headers=None): + """ + Retrieves information for one or more API keys. + ``_ + + :arg id: API key id of the API key to be retrieved + :arg name: API key name of the API key to be retrieved + :arg owner: flag to query API keys owned by the currently + authenticated user + :arg realm_name: realm name of the user who created this API key + to be retrieved + :arg username: user name of the user who created this API key to + be retrieved + """ + return self.transport.perform_request( + "GET", "/_security/api_key", params=params, headers=headers + ) + + @query_params() + def get_privileges(self, application=None, name=None, params=None, headers=None): + """ + Retrieves application privileges. + ``_ + + :arg application: Application name + :arg name: Privilege name + """ + return self.transport.perform_request( + "GET", + _make_path("_security", "privilege", application, name), + params=params, + headers=headers, + ) + + @query_params() + def get_role(self, name=None, params=None, headers=None): + """ + Retrieves roles in the native realm. + ``_ + + :arg name: Role name + """ + return self.transport.perform_request( + "GET", _make_path("_security", "role", name), params=params, headers=headers + ) + + @query_params() + def get_role_mapping(self, name=None, params=None, headers=None): + """ + Retrieves role mappings. + ``_ + + :arg name: Role-Mapping name + """ + return self.transport.perform_request( + "GET", + _make_path("_security", "role_mapping", name), + params=params, + headers=headers, + ) + + @query_params() + def get_token(self, body, params=None, headers=None): + """ + Creates a bearer token for access without requiring basic authentication. + ``_ + + :arg body: The token request to get + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_security/oauth2/token", params=params, headers=headers, body=body + ) + + @query_params() + def get_user(self, username=None, params=None, headers=None): + """ + Retrieves information about users in the native realm and built-in users. + ``_ + + :arg username: A comma-separated list of usernames + """ + return self.transport.perform_request( + "GET", + _make_path("_security", "user", username), + params=params, + headers=headers, + ) + + @query_params() + def get_user_privileges(self, params=None, headers=None): + """ + Retrieves application privileges. + ``_ + """ + return self.transport.perform_request( + "GET", "/_security/user/_privileges", params=params, headers=headers + ) + + @query_params() + def has_privileges(self, body, user=None, params=None, headers=None): + """ + Determines whether the specified user has a specified list of privileges. + ``_ + + :arg body: The privileges to test + :arg user: Username + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", + _make_path("_security", "user", user, "_has_privileges"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def invalidate_api_key(self, body, params=None, headers=None): + """ + Invalidates one or more API keys. + ``_ + + :arg body: The api key request to invalidate API key(s) + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "DELETE", "/_security/api_key", params=params, headers=headers, body=body + ) + + @query_params() + def invalidate_token(self, body, params=None, headers=None): + """ + Invalidates one or more access tokens or refresh tokens. + ``_ + + :arg body: The token to invalidate + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "DELETE", + "/_security/oauth2/token", + params=params, + headers=headers, + body=body, + ) + + @query_params("refresh") + def put_privileges(self, body, params=None, headers=None): + """ + Adds or updates application privileges. + ``_ + + :arg body: The privilege(s) to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "PUT", "/_security/privilege/", params=params, headers=headers, body=body + ) + + @query_params("refresh") + def put_role(self, name, body, params=None, headers=None): + """ + Adds and updates roles in the native realm. + ``_ + + :arg name: Role name + :arg body: The role to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "role", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("refresh") + def put_role_mapping(self, name, body, params=None, headers=None): + """ + Creates and updates role mappings. + ``_ + + :arg name: Role-mapping name + :arg body: The role mapping to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (name, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "role_mapping", name), + params=params, + headers=headers, + body=body, + ) + + @query_params("refresh") + def put_user(self, username, body, params=None, headers=None): + """ + Adds and updates users in the native realm. These users are commonly referred + to as native users. + ``_ + + :arg username: The username of the User + :arg body: The user to add + :arg refresh: If `true` (the default) then refresh the affected + shards to make this operation visible to search, if `wait_for` then wait + for a refresh to make this operation visible to search, if `false` then + do nothing with refreshes. Valid choices: true, false, wait_for + """ + for param in (username, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_security", "user", username), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_builtin_privileges(self, params=None, headers=None): + """ + Retrieves the list of cluster privileges and index privileges that are + available in this version of Elasticsearch. + ``_ + """ + return self.transport.perform_request( + "GET", "/_security/privilege/_builtin", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py new file mode 100644 index 000000000..915650d8e --- /dev/null +++ b/elasticsearch/_async/client/slm.py @@ -0,0 +1,135 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class SlmClient(NamespacedClient): + @query_params() + def delete_lifecycle(self, policy_id, params=None, headers=None): + """ + Deletes an existing snapshot lifecycle policy. + ``_ + + :arg policy_id: The id of the snapshot lifecycle policy to + remove + """ + if policy_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy_id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_slm", "policy", policy_id), + params=params, + headers=headers, + ) + + @query_params() + def execute_lifecycle(self, policy_id, params=None, headers=None): + """ + Immediately creates a snapshot according to the lifecycle policy, without + waiting for the scheduled time. + ``_ + + :arg policy_id: The id of the snapshot lifecycle policy to be + executed + """ + if policy_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_slm", "policy", policy_id, "_execute"), + params=params, + headers=headers, + ) + + @query_params() + def execute_retention(self, params=None, headers=None): + """ + Deletes any snapshots that are expired according to the policy's retention + rules. + ``_ + """ + return self.transport.perform_request( + "POST", "/_slm/_execute_retention", params=params, headers=headers + ) + + @query_params() + def get_lifecycle(self, policy_id=None, params=None, headers=None): + """ + Retrieves one or more snapshot lifecycle policy definitions and information + about the latest snapshot attempts. + ``_ + + :arg policy_id: Comma-separated list of snapshot lifecycle + policies to retrieve + """ + return self.transport.perform_request( + "GET", + _make_path("_slm", "policy", policy_id), + params=params, + headers=headers, + ) + + @query_params() + def get_stats(self, params=None, headers=None): + """ + Returns global and policy-level statistics about actions taken by snapshot + lifecycle management. + ``_ + """ + return self.transport.perform_request( + "GET", "/_slm/stats", params=params, headers=headers + ) + + @query_params() + def put_lifecycle(self, policy_id, body=None, params=None, headers=None): + """ + Creates or updates a snapshot lifecycle policy. + ``_ + + :arg policy_id: The id of the snapshot lifecycle policy + :arg body: The snapshot lifecycle policy definition to register + """ + if policy_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'policy_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_slm", "policy", policy_id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_status(self, params=None, headers=None): + """ + Retrieves the status of snapshot lifecycle management (SLM). + ``_ + """ + return self.transport.perform_request( + "GET", "/_slm/status", params=params, headers=headers + ) + + @query_params() + def start(self, params=None, headers=None): + """ + Turns on snapshot lifecycle management (SLM). + ``_ + """ + return self.transport.perform_request( + "POST", "/_slm/start", params=params, headers=headers + ) + + @query_params() + def stop(self, params=None, headers=None): + """ + Turns off snapshot lifecycle management (SLM). + ``_ + """ + return self.transport.perform_request( + "POST", "/_slm/stop", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py new file mode 100644 index 000000000..55b4b759e --- /dev/null +++ b/elasticsearch/_async/client/snapshot.py @@ -0,0 +1,233 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class SnapshotClient(NamespacedClient): + @query_params("master_timeout", "wait_for_completion") + def create(self, repository, snapshot, body=None, params=None, headers=None): + """ + Creates a snapshot in a repository. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg body: The snapshot definition + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg wait_for_completion: Should this request wait until the + operation has completed before returning + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_snapshot", repository, snapshot), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout") + def delete(self, repository, snapshot, params=None, headers=None): + """ + Deletes a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "DELETE", + _make_path("_snapshot", repository, snapshot), + params=params, + headers=headers, + ) + + @query_params("ignore_unavailable", "master_timeout", "verbose") + def get(self, repository, snapshot, params=None, headers=None): + """ + Returns information about a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A comma-separated list of snapshot names + :arg ignore_unavailable: Whether to ignore unavailable + snapshots, defaults to false which means a SnapshotMissingException is + thrown + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg verbose: Whether to show verbose snapshot info or only show + the basic info found in the repository index blob + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "GET", + _make_path("_snapshot", repository, snapshot), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def delete_repository(self, repository, params=None, headers=None): + """ + Deletes a repository. + ``_ + + :arg repository: A comma-separated list of repository names + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_snapshot", repository), + params=params, + headers=headers, + ) + + @query_params("local", "master_timeout") + def get_repository(self, repository=None, params=None, headers=None): + """ + Returns information about a repository. + ``_ + + :arg repository: A comma-separated list of repository names + :arg local: Return local information, do not retrieve the state + from master node (default: false) + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", _make_path("_snapshot", repository), params=params, headers=headers + ) + + @query_params("master_timeout", "timeout", "verify") + def create_repository(self, repository, body, params=None, headers=None): + """ + Creates a repository. + ``_ + + :arg repository: A repository name + :arg body: The repository definition + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + :arg verify: Whether to verify the repository after creation + """ + for param in (repository, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_snapshot", repository), + params=params, + headers=headers, + body=body, + ) + + @query_params("master_timeout", "wait_for_completion") + def restore(self, repository, snapshot, body=None, params=None, headers=None): + """ + Restores a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A snapshot name + :arg body: Details of what to restore + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg wait_for_completion: Should this request wait until the + operation has completed before returning + """ + for param in (repository, snapshot): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_snapshot", repository, snapshot, "_restore"), + params=params, + headers=headers, + body=body, + ) + + @query_params("ignore_unavailable", "master_timeout") + def status(self, repository=None, snapshot=None, params=None, headers=None): + """ + Returns information about the status of a snapshot. + ``_ + + :arg repository: A repository name + :arg snapshot: A comma-separated list of snapshot names + :arg ignore_unavailable: Whether to ignore unavailable + snapshots, defaults to false which means a SnapshotMissingException is + thrown + :arg master_timeout: Explicit operation timeout for connection + to master node + """ + return self.transport.perform_request( + "GET", + _make_path("_snapshot", repository, snapshot, "_status"), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def verify_repository(self, repository, params=None, headers=None): + """ + Verifies a repository. + ``_ + + :arg repository: A repository name + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + + return self.transport.perform_request( + "POST", + _make_path("_snapshot", repository, "_verify"), + params=params, + headers=headers, + ) + + @query_params("master_timeout", "timeout") + def cleanup_repository(self, repository, params=None, headers=None): + """ + Removes stale data from repository. + ``_ + + :arg repository: A repository name + :arg master_timeout: Explicit operation timeout for connection + to master node + :arg timeout: Explicit operation timeout + """ + if repository in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'repository'.") + + return self.transport.perform_request( + "POST", + _make_path("_snapshot", repository, "_cleanup"), + params=params, + headers=headers, + ) diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py new file mode 100644 index 000000000..e043fee3d --- /dev/null +++ b/elasticsearch/_async/client/sql.py @@ -0,0 +1,56 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, SKIP_IN_PATH + + +class SqlClient(NamespacedClient): + @query_params() + def clear_cursor(self, body, params=None, headers=None): + """ + Clears the SQL cursor + ``_ + + :arg body: Specify the cursor value in the `cursor` element to + clean the cursor. + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_sql/close", params=params, headers=headers, body=body + ) + + @query_params("format") + def query(self, body, params=None, headers=None): + """ + Executes a SQL request + ``_ + + :arg body: Use the `query` element to start a query. Use the + `cursor` element to continue a query. + :arg format: a short version of the Accept header, e.g. json, + yaml + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_sql", params=params, headers=headers, body=body + ) + + @query_params() + def translate(self, body, params=None, headers=None): + """ + Translates SQL into Elasticsearch queries + ``_ + + :arg body: Specify the query in the `query` element. + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_sql/translate", params=params, headers=headers, body=body + ) diff --git a/elasticsearch/_async/client/ssl.py b/elasticsearch/_async/client/ssl.py new file mode 100644 index 000000000..e23d47375 --- /dev/null +++ b/elasticsearch/_async/client/ssl.py @@ -0,0 +1,18 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params + + +class SslClient(NamespacedClient): + @query_params() + def certificates(self, params=None, headers=None): + """ + Retrieves information about the X.509 certificates used to encrypt + communications in the cluster. + ``_ + """ + return self.transport.perform_request( + "GET", "/_ssl/certificates", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py new file mode 100644 index 000000000..426bc8c8c --- /dev/null +++ b/elasticsearch/_async/client/tasks.py @@ -0,0 +1,84 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class TasksClient(NamespacedClient): + @query_params( + "actions", + "detailed", + "group_by", + "nodes", + "parent_task_id", + "timeout", + "wait_for_completion", + ) + def list(self, params=None, headers=None): + """ + Returns a list of tasks. + ``_ + + :arg actions: A comma-separated list of actions that should be + returned. Leave empty to return all. + :arg detailed: Return detailed task information (default: false) + :arg group_by: Group tasks by nodes or parent/child + relationships Valid choices: nodes, parents, none Default: nodes + :arg nodes: A comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all nodes + :arg parent_task_id: Return tasks with specified parent task id + (node_id:task_number). Set to -1 to return all. + :arg timeout: Explicit operation timeout + :arg wait_for_completion: Wait for the matching tasks to + complete (default: false) + """ + return self.transport.perform_request( + "GET", "/_tasks", params=params, headers=headers + ) + + @query_params("actions", "nodes", "parent_task_id", "wait_for_completion") + def cancel(self, task_id=None, params=None, headers=None): + """ + Cancels a task, if it can be cancelled through an API. + ``_ + + :arg task_id: Cancel the task with specified task id + (node_id:task_number) + :arg actions: A comma-separated list of actions that should be + cancelled. Leave empty to cancel all. + :arg nodes: A comma-separated list of node IDs or names to limit + the returned information; use `_local` to return information from the + node you're connecting to, leave empty to get information from all nodes + :arg parent_task_id: Cancel tasks with specified parent task id + (node_id:task_number). Set to -1 to cancel all. + :arg wait_for_completion: Should the request block until the + cancellation of the task and its descendant tasks is completed. Defaults + to false + """ + return self.transport.perform_request( + "POST", + _make_path("_tasks", task_id, "_cancel"), + params=params, + headers=headers, + ) + + @query_params("timeout", "wait_for_completion") + def get(self, task_id, params=None, headers=None): + """ + Returns information about a task. + ``_ + + :arg task_id: Return the task with specified id + (node_id:task_number) + :arg timeout: Explicit operation timeout + :arg wait_for_completion: Wait for the matching tasks to + complete (default: false) + """ + if task_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'task_id'.") + + return self.transport.perform_request( + "GET", _make_path("_tasks", task_id), params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py new file mode 100644 index 000000000..23159bbb3 --- /dev/null +++ b/elasticsearch/_async/client/transform.py @@ -0,0 +1,208 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class TransformClient(NamespacedClient): + @query_params("force") + def delete_transform(self, transform_id, params=None, headers=None): + """ + Deletes an existing transform. + ``_ + + :arg transform_id: The id of the transform to delete + :arg force: When `true`, the transform is deleted regardless of + its current state. The default value is `false`, meaning that the + transform must be `stopped` before it can be deleted. + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "DELETE", + _make_path("_transform", transform_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_transform(self, transform_id=None, params=None, headers=None): + """ + Retrieves configuration information for transforms. + ``_ + + :arg transform_id: The id or comma delimited list of id + expressions of the transforms to get, '_all' or '*' implies get all + transforms + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg from\\_: skips a number of transform configs, defaults to 0 + :arg size: specifies a max number of transforms to get, defaults + to 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + return self.transport.perform_request( + "GET", + _make_path("_transform", transform_id), + params=params, + headers=headers, + ) + + @query_params("allow_no_match", "from_", "size") + def get_transform_stats(self, transform_id, params=None, headers=None): + """ + Retrieves usage information for transforms. + ``_ + + :arg transform_id: The id of the transform for which to get + stats. '_all' or '*' implies all transforms + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg from\\_: skips a number of transform stats, defaults to 0 + :arg size: specifies a max number of transform stats to get, + defaults to 100 + """ + # from is a reserved word so it cannot be used, use from_ instead + if "from_" in params: + params["from"] = params.pop("from_") + + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "GET", + _make_path("_transform", transform_id, "_stats"), + params=params, + headers=headers, + ) + + @query_params() + def preview_transform(self, body, params=None, headers=None): + """ + Previews a transform. + ``_ + + :arg body: The definition for the transform to preview + """ + if body in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'body'.") + + return self.transport.perform_request( + "POST", "/_transform/_preview", params=params, headers=headers, body=body + ) + + @query_params("defer_validation") + def put_transform(self, transform_id, body, params=None, headers=None): + """ + Instantiates a transform. + ``_ + + :arg transform_id: The id of the new transform. + :arg body: The transform definition + :arg defer_validation: If validations should be deferred until + transform starts, defaults to false. + """ + for param in (transform_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "PUT", + _make_path("_transform", transform_id), + params=params, + headers=headers, + body=body, + ) + + @query_params("timeout") + def start_transform(self, transform_id, params=None, headers=None): + """ + Starts one or more transforms. + ``_ + + :arg transform_id: The id of the transform to start + :arg timeout: Controls the time to wait for the transform to + start + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_transform", transform_id, "_start"), + params=params, + headers=headers, + ) + + @query_params( + "allow_no_match", + "force", + "timeout", + "wait_for_checkpoint", + "wait_for_completion", + ) + def stop_transform(self, transform_id, params=None, headers=None): + """ + Stops one or more transforms. + ``_ + + :arg transform_id: The id of the transform to stop + :arg allow_no_match: Whether to ignore if a wildcard expression + matches no transforms. (This includes `_all` string or when no + transforms have been specified) + :arg force: Whether to force stop a failed transform or not. + Default to false + :arg timeout: Controls the time to wait until the transform has + stopped. Default to 30 seconds + :arg wait_for_checkpoint: Whether to wait for the transform to + reach a checkpoint before stopping. Default to false + :arg wait_for_completion: Whether to wait for the transform to + fully stop before returning or not. Default to false + """ + if transform_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for a required argument 'transform_id'." + ) + + return self.transport.perform_request( + "POST", + _make_path("_transform", transform_id, "_stop"), + params=params, + headers=headers, + ) + + @query_params("defer_validation") + def update_transform(self, transform_id, body, params=None, headers=None): + """ + Updates certain properties of a transform. + ``_ + + :arg transform_id: The id of the transform. + :arg body: The update transform definition + :arg defer_validation: If validations should be deferred until + transform starts, defaults to false. + """ + for param in (transform_id, body): + if param in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument.") + + return self.transport.perform_request( + "POST", + _make_path("_transform", transform_id, "_update"), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/utils.py b/elasticsearch/_async/client/utils.py new file mode 100644 index 000000000..11082db47 --- /dev/null +++ b/elasticsearch/_async/client/utils.py @@ -0,0 +1,130 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from __future__ import unicode_literals + +import weakref +from datetime import date, datetime +from functools import wraps +from ..compat import string_types, quote, PY2 + +# parts of URL to be omitted +SKIP_IN_PATH = (None, "", b"", [], ()) + + +def _escape(value): + """ + Escape a single value of a URL string or a query parameter. If it is a list + or tuple, turn it into a comma-separated string first. + """ + + # make sequences into comma-separated stings + if isinstance(value, (list, tuple)): + value = ",".join(value) + + # dates and datetimes into isoformat + elif isinstance(value, (date, datetime)): + value = value.isoformat() + + # make bools into true/false strings + elif isinstance(value, bool): + value = str(value).lower() + + # don't decode bytestrings + elif isinstance(value, bytes): + return value + + # encode strings to utf-8 + if isinstance(value, string_types): + if PY2 and isinstance(value, unicode): # noqa: F821 + return value.encode("utf-8") + if not PY2 and isinstance(value, str): + return value.encode("utf-8") + + return str(value) + + +def _make_path(*parts): + """ + Create a URL string from parts, omit all `None` values and empty strings. + Convert lists and tuples to comma separated values. + """ + # TODO: maybe only allow some parts to be lists/tuples ? + return "/" + "/".join( + # preserve ',' and '*' in url for nicer URLs in logs + quote(_escape(p), b",*") + for p in parts + if p not in SKIP_IN_PATH + ) + + +# parameters that apply to all methods +GLOBAL_PARAMS = ("pretty", "human", "error_trace", "format", "filter_path") + + +def query_params(*es_query_params): + """ + Decorator that pops all accepted parameters from method's kwargs and puts + them in the params argument. + """ + + def _wrapper(func): + @wraps(func) + def _wrapped(*args, **kwargs): + params = (kwargs.pop("params", None) or {}).copy() + headers = { + k.lower(): v + for k, v in (kwargs.pop("headers", None) or {}).copy().items() + } + + if "opaque_id" in kwargs: + headers["x-opaque-id"] = kwargs.pop("opaque_id") + + for p in es_query_params + GLOBAL_PARAMS: + if p in kwargs: + v = kwargs.pop(p) + if v is not None: + params[p] = _escape(v) + + # don't treat ignore, request_timeout, and opaque_id as other params to avoid escaping + for p in ("ignore", "request_timeout"): + if p in kwargs: + params[p] = kwargs.pop(p) + return func(*args, params=params, headers=headers, **kwargs) + + return _wrapped + + return _wrapper + + +def _bulk_body(serializer, body): + # if not passed in a string, serialize items and join by newline + if not isinstance(body, string_types): + body = "\n".join(map(serializer.dumps, body)) + + # bulk body must end with a newline + if isinstance(body, bytes): + if not body.endswith(b"\n"): + body += b"\n" + elif isinstance(body, string_types) and not body.endswith("\n"): + body += "\n" + + return body + + +class NamespacedClient(object): + def __init__(self, client): + self.client = client + + @property + def transport(self): + return self.client.transport + + +class AddonClient(NamespacedClient): + @classmethod + def infect_client(cls, client): + addon = cls(weakref.proxy(client)) + setattr(client, cls.namespace, addon) + return client diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py new file mode 100644 index 000000000..654c4b489 --- /dev/null +++ b/elasticsearch/_async/client/watcher.py @@ -0,0 +1,180 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH + + +class WatcherClient(NamespacedClient): + @query_params() + def ack_watch(self, watch_id, action_id=None, params=None, headers=None): + """ + Acknowledges a watch, manually throttling the execution of the watch's actions. + ``_ + + :arg watch_id: Watch ID + :arg action_id: A comma-separated list of the action ids to be + acked + """ + if watch_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'watch_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", watch_id, "_ack", action_id), + params=params, + headers=headers, + ) + + @query_params() + def activate_watch(self, watch_id, params=None, headers=None): + """ + Activates a currently inactive watch. + ``_ + + :arg watch_id: Watch ID + """ + if watch_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'watch_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", watch_id, "_activate"), + params=params, + headers=headers, + ) + + @query_params() + def deactivate_watch(self, watch_id, params=None, headers=None): + """ + Deactivates a currently active watch. + ``_ + + :arg watch_id: Watch ID + """ + if watch_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'watch_id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", watch_id, "_deactivate"), + params=params, + headers=headers, + ) + + @query_params() + def delete_watch(self, id, params=None, headers=None): + """ + Removes a watch from Watcher. + ``_ + + :arg id: Watch ID + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "DELETE", + _make_path("_watcher", "watch", id), + params=params, + headers=headers, + ) + + @query_params("debug") + def execute_watch(self, body=None, id=None, params=None, headers=None): + """ + Forces the execution of a stored watch. + ``_ + + :arg body: Execution control + :arg id: Watch ID + :arg debug: indicates whether the watch should execute in debug + mode + """ + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", id, "_execute"), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def get_watch(self, id, params=None, headers=None): + """ + Retrieves a watch by its ID. + ``_ + + :arg id: Watch ID + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "GET", _make_path("_watcher", "watch", id), params=params, headers=headers + ) + + @query_params("active", "if_primary_term", "if_seq_no", "version") + def put_watch(self, id, body=None, params=None, headers=None): + """ + Creates a new watch, or updates an existing one. + ``_ + + :arg id: Watch ID + :arg body: The watch + :arg active: Specify whether the watch is in/active by default + :arg if_primary_term: only update the watch if the last + operation that has changed the watch has the specified primary term + :arg if_seq_no: only update the watch if the last operation that + has changed the watch has the specified sequence number + :arg version: Explicit version number for concurrency control + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for a required argument 'id'.") + + return self.transport.perform_request( + "PUT", + _make_path("_watcher", "watch", id), + params=params, + headers=headers, + body=body, + ) + + @query_params() + def start(self, params=None, headers=None): + """ + Starts Watcher if it is not already running. + ``_ + """ + return self.transport.perform_request( + "POST", "/_watcher/_start", params=params, headers=headers + ) + + @query_params("emit_stacktraces") + def stats(self, metric=None, params=None, headers=None): + """ + Retrieves the current Watcher metrics. + ``_ + + :arg metric: Controls what additional stat metrics should be + include in the response Valid choices: _all, queued_watches, + current_watches, pending_watches + :arg emit_stacktraces: Emits stack traces of currently running + watches + """ + return self.transport.perform_request( + "GET", + _make_path("_watcher", "stats", metric), + params=params, + headers=headers, + ) + + @query_params() + def stop(self, params=None, headers=None): + """ + Stops Watcher if it is running. + ``_ + """ + return self.transport.perform_request( + "POST", "/_watcher/_stop", params=params, headers=headers + ) diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py new file mode 100644 index 000000000..61560fd4a --- /dev/null +++ b/elasticsearch/_async/client/xpack.py @@ -0,0 +1,36 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from .utils import NamespacedClient, query_params + + +class XPackClient(NamespacedClient): + def __getattr__(self, attr_name): + return getattr(self.client, attr_name) + + # AUTO-GENERATED-API-DEFINITIONS # + @query_params("categories") + def info(self, params=None, headers=None): + """ + Retrieves information about the installed X-Pack features. + ``_ + + :arg categories: Comma-separated list of info categories. Can be + any of: build, license, features + """ + return self.transport.perform_request( + "GET", "/_xpack", params=params, headers=headers + ) + + @query_params("master_timeout") + def usage(self, params=None, headers=None): + """ + Retrieves usage information about the installed X-Pack features. + ``_ + + :arg master_timeout: Specify timeout for watch write operation + """ + return self.transport.perform_request( + "GET", "/_xpack/usage", params=params, headers=headers + ) diff --git a/elasticsearch/_async/compat.py b/elasticsearch/_async/compat.py index 3c188c236..9de896073 100644 --- a/elasticsearch/_async/compat.py +++ b/elasticsearch/_async/compat.py @@ -3,6 +3,7 @@ # See the LICENSE file in the project root for more information import asyncio +from ..compat import * # noqa # Hack supporting Python 3.6 asyncio which didn't have 'get_running_loop()'. # Essentially we want to get away from having users pass in a loop to us. diff --git a/elasticsearch/client/__init__.py b/elasticsearch/client/__init__.py index f93281a71..ba56f75cf 100644 --- a/elasticsearch/client/__init__.py +++ b/elasticsearch/client/__init__.py @@ -6,9 +6,7 @@ from __future__ import unicode_literals import logging -from ..transport import Transport -from ..exceptions import TransportError -from ..compat import string_types, urlparse, unquote +from ..transport import Transport, TransportError from .async_search import AsyncSearchClient from .autoscaling import AutoscalingClient from .indices import IndicesClient @@ -20,7 +18,7 @@ from .snapshot import SnapshotClient from .tasks import TasksClient from .xpack import XPackClient -from .utils import query_params, _make_path, SKIP_IN_PATH, _bulk_body +from .utils import query_params, _make_path, SKIP_IN_PATH, _bulk_body, _normalize_hosts # xpack APIs from .ccr import CcrClient @@ -45,51 +43,6 @@ logger = logging.getLogger("elasticsearch") -def _normalize_hosts(hosts): - """ - Helper function to transform hosts argument to - :class:`~elasticsearch.Elasticsearch` to a list of dicts. - """ - # if hosts are empty, just defer to defaults down the line - if hosts is None: - return [{}] - - # passed in just one string - if isinstance(hosts, string_types): - hosts = [hosts] - - out = [] - # normalize hosts to dicts - for host in hosts: - if isinstance(host, string_types): - if "://" not in host: - host = "//%s" % host - - parsed_url = urlparse(host) - h = {"host": parsed_url.hostname} - - if parsed_url.port: - h["port"] = parsed_url.port - - if parsed_url.scheme == "https": - h["port"] = parsed_url.port or 443 - h["use_ssl"] = True - - if parsed_url.username or parsed_url.password: - h["http_auth"] = "%s:%s" % ( - unquote(parsed_url.username), - unquote(parsed_url.password), - ) - - if parsed_url.path and parsed_url.path != "/": - h["url_prefix"] = parsed_url.path - - out.append(h) - else: - out.append(host) - return out - - class Elasticsearch(object): """ Elasticsearch low-level client. Provides a straightforward mapping from @@ -276,6 +229,17 @@ def __repr__(self): # probably operating on custom transport and connection_pool, ignore return super(Elasticsearch, self).__repr__() + def __enter__(self): + if hasattr(self.transport, "_async_call"): + self.transport._async_call() + return self + + def __exit__(self, *_): + self.close() + + def close(self): + self.transport.close() + # AUTO-GENERATED-API-DEFINITIONS # @query_params() def ping(self, params=None, headers=None): diff --git a/elasticsearch/client/utils.py b/elasticsearch/client/utils.py index 11082db47..e8241fc23 100644 --- a/elasticsearch/client/utils.py +++ b/elasticsearch/client/utils.py @@ -7,12 +7,57 @@ import weakref from datetime import date, datetime from functools import wraps -from ..compat import string_types, quote, PY2 +from ..compat import string_types, quote, PY2, unquote, urlparse # parts of URL to be omitted SKIP_IN_PATH = (None, "", b"", [], ()) +def _normalize_hosts(hosts): + """ + Helper function to transform hosts argument to + :class:`~elasticsearch.Elasticsearch` to a list of dicts. + """ + # if hosts are empty, just defer to defaults down the line + if hosts is None: + return [{}] + + # passed in just one string + if isinstance(hosts, string_types): + hosts = [hosts] + + out = [] + # normalize hosts to dicts + for host in hosts: + if isinstance(host, string_types): + if "://" not in host: + host = "//%s" % host + + parsed_url = urlparse(host) + h = {"host": parsed_url.hostname} + + if parsed_url.port: + h["port"] = parsed_url.port + + if parsed_url.scheme == "https": + h["port"] = parsed_url.port or 443 + h["use_ssl"] = True + + if parsed_url.username or parsed_url.password: + h["http_auth"] = "%s:%s" % ( + unquote(parsed_url.username), + unquote(parsed_url.password), + ) + + if parsed_url.path and parsed_url.path != "/": + h["url_prefix"] = parsed_url.path + + out.append(h) + else: + out.append(host) + return out + + def _escape(value): """ Escape a single value of a URL string or a query parameter. If it is a list diff --git a/test_elasticsearch/test_async/test_server/__init__.py b/test_elasticsearch/test_async/test_server/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_elasticsearch/test_async/test_server/conftest.py b/test_elasticsearch/test_async/test_server/conftest.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_elasticsearch/test_async/test_server/test_clients.py b/test_elasticsearch/test_async/test_server/test_clients.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_elasticsearch/test_async/test_server/test_rest_api_spec.py b/test_elasticsearch/test_async/test_server/test_rest_api_spec.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/generate_api.py b/utils/generate_api.py index 99c3d1492..402f100ce 100644 --- a/utils/generate_api.py +++ b/utils/generate_api.py @@ -16,6 +16,7 @@ from pathlib import Path from jinja2 import Environment, FileSystemLoader, TemplateNotFound +import unasync http = urllib3.PoolManager() @@ -79,9 +80,8 @@ def add(self, api): def parse_orig(self): self.orders = [] self.header = "class C:" - fname = CODE_ROOT / "elasticsearch" / "client" / f"{self.namespace}.py" - if os.path.exists(fname): - with open(fname) as f: + if os.path.exists(self.filepath): + with open(self.filepath) as f: content = f.read() header_lines = [] for line in content.split("\n"): @@ -97,7 +97,7 @@ def parse_orig(self): break self.header = "\n".join(header_lines) self.orders = re.findall( - r'\n def ([a-z_]+)\(', + r'\n (?:async )?def ([a-z_]+)\(', content, re.MULTILINE ) @@ -113,12 +113,15 @@ def sort(self): def dump(self): self.sort() - fname = CODE_ROOT / "elasticsearch" / "client" / f"{self.namespace}.py" - with open(fname, "w") as f: + with open(self.filepath, "w") as f: f.write(self.header) for api in self._apis: f.write(api.to_python()) - blacken(fname) + blacken(self.filepath) + + @property + def filepath(self): + return CODE_ROOT / f"elasticsearch/_async/client/{self.namespace}.py" class API: @@ -299,6 +302,31 @@ def dump_modules(modules): for mod in modules.values(): mod.dump() + # Unasync all the generated async code + additional_replacements = { + # We want to rewrite to 'Transport' instead of 'SyncTransport', etc + "AsyncTransport": "Transport", + "AsyncElasticsearch": "Elasticsearch", + # We don't want to rewrite this class + "AsyncSearchClient": "AsyncSearchClient", + } + rules = [ + unasync.Rule( + fromdir="/elasticsearch/_async/client/", + todir="/elasticsearch/client/", + additional_replacements=additional_replacements + ), + ] + + filepaths = [] + for root, _, filenames in os.walk(CODE_ROOT / "elasticsearch/_async"): + for filename in filenames: + if filename.endswith(".py") and filename != "utils.py": + filepaths.append(os.path.join(root, filename)) + + unasync.unasync_files(filepaths, rules) + blacken(CODE_ROOT / "elasticsearch") + if __name__ == "__main__": dump_modules(read_modules()) diff --git a/utils/templates/base b/utils/templates/base index f00199877..e56425467 100644 --- a/utils/templates/base +++ b/utils/templates/base @@ -1,6 +1,6 @@ @query_params({{ api.query_params|map("tojson")|join(", ")}}) - def {{ api.name }}(self, {% include "func_params" %}): + async def {{ api.name }}(self, {% include "func_params" %}): """ {% if api.description %} {{ api.description|replace("\n", " ")|wordwrap(wrapstring="\n ") }} @@ -24,6 +24,6 @@ body = _bulk_body(self.transport.serializer, body) {% endif %} {% block request %} - return self.transport.perform_request("{{ api.method }}", {% include "url" %}, params=params, headers=headers{% if api.body %}, body=body{% endif %}) + return await self.transport.perform_request("{{ api.method }}", {% include "url" %}, params=params, headers=headers{% if api.body %}, body=body{% endif %}) {% endblock %} diff --git a/utils/templates/overrides/__init__/clear_scroll b/utils/templates/overrides/__init__/clear_scroll index 1176551fa..ecbecd2ac 100644 --- a/utils/templates/overrides/__init__/clear_scroll +++ b/utils/templates/overrides/__init__/clear_scroll @@ -7,6 +7,6 @@ elif scroll_id: params["scroll_id"] = scroll_id - return self.transport.perform_request("{{ api.method }}", "/_search/scroll", params=params, headers=headers, body=body) + return await self.transport.perform_request("{{ api.method }}", "/_search/scroll", params=params, headers=headers, body=body) {% endblock %} diff --git a/utils/templates/overrides/__init__/create b/utils/templates/overrides/__init__/create index f2da5ca3c..4102a202c 100644 --- a/utils/templates/overrides/__init__/create +++ b/utils/templates/overrides/__init__/create @@ -5,6 +5,6 @@ else: path = _make_path(index, doc_type, id) - return self.transport.perform_request("POST" if id in SKIP_IN_PATH else "PUT", path, params=params, headers=headers, body=body) + return await self.transport.perform_request("POST" if id in SKIP_IN_PATH else "PUT", path, params=params, headers=headers, body=body) {% endblock %} diff --git a/utils/templates/overrides/__init__/index b/utils/templates/overrides/__init__/index index 6e3a5a13d..826cdf4f7 100644 --- a/utils/templates/overrides/__init__/index +++ b/utils/templates/overrides/__init__/index @@ -1,6 +1,6 @@ {% extends "base" %} {% block request %} - return self.transport.perform_request( + return await self.transport.perform_request( "POST" if id in SKIP_IN_PATH else "PUT", _make_path(index, "_doc", id), params=params, diff --git a/utils/templates/overrides/__init__/scroll b/utils/templates/overrides/__init__/scroll index 5a6ae8335..243143e42 100644 --- a/utils/templates/overrides/__init__/scroll +++ b/utils/templates/overrides/__init__/scroll @@ -7,6 +7,6 @@ elif scroll_id: params["scroll_id"] = scroll_id - return self.transport.perform_request("{{ api.method }}", "/_search/scroll", params=params, headers=headers, body=body) + return await self.transport.perform_request("{{ api.method }}", "/_search/scroll", params=params, headers=headers, body=body) {% endblock %} diff --git a/utils/templates/overrides/__init__/update b/utils/templates/overrides/__init__/update index bd2919c99..04025f9d3 100644 --- a/utils/templates/overrides/__init__/update +++ b/utils/templates/overrides/__init__/update @@ -5,6 +5,6 @@ else: path = _make_path(index, doc_type, id, "_update") - return self.transport.perform_request("{{ api.method }}", path, params=params, headers=headers, body=body) + return await self.transport.perform_request("{{ api.method }}", path, params=params, headers=headers, body=body) {% endblock %} diff --git a/utils/templates/overrides/cluster/stats b/utils/templates/overrides/cluster/stats index aed2d3b10..3223013a7 100644 --- a/utils/templates/overrides/cluster/stats +++ b/utils/templates/overrides/cluster/stats @@ -1,5 +1,5 @@ {% extends "base" %} {% block request %} - return self.transport.perform_request("{{ api.method }}", "/_cluster/stats" if node_id in SKIP_IN_PATH else _make_path("_cluster", "stats", "nodes", node_id), params=params, headers=headers) + return await self.transport.perform_request("{{ api.method }}", "/_cluster/stats" if node_id in SKIP_IN_PATH else _make_path("_cluster", "stats", "nodes", node_id), params=params, headers=headers) {% endblock%} From 7f6ccbe227218b930e542c2b9f9fb4af4bc13af9 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Fri, 15 May 2020 09:37:40 -0500 Subject: [PATCH 4/5] Generate async API --- elasticsearch/_async/client/__init__.py | 222 +++++++-------- elasticsearch/_async/client/async_search.py | 12 +- elasticsearch/_async/client/autoscaling.py | 16 +- elasticsearch/_async/client/cat.py | 100 +++---- elasticsearch/_async/client/ccr.py | 52 ++-- elasticsearch/_async/client/cluster.py | 68 ++--- elasticsearch/_async/client/enrich.py | 20 +- elasticsearch/_async/client/eql.py | 4 +- elasticsearch/_async/client/graph.py | 4 +- elasticsearch/_async/client/ilm.py | 40 +-- elasticsearch/_async/client/indices.py | 240 ++++++++-------- elasticsearch/_async/client/ingest.py | 20 +- elasticsearch/_async/client/license.py | 28 +- elasticsearch/_async/client/migration.py | 4 +- elasticsearch/_async/client/ml.py | 258 ++++++++++-------- elasticsearch/_async/client/monitoring.py | 4 +- elasticsearch/_async/client/nodes.py | 20 +- elasticsearch/_async/client/rollup.py | 34 +-- .../_async/client/searchable_snapshots.py | 16 +- elasticsearch/_async/client/security.py | 106 +++---- elasticsearch/_async/client/slm.py | 36 +-- elasticsearch/_async/client/snapshot.py | 42 +-- elasticsearch/_async/client/sql.py | 12 +- elasticsearch/_async/client/ssl.py | 4 +- elasticsearch/_async/client/tasks.py | 12 +- elasticsearch/_async/client/transform.py | 32 +-- elasticsearch/_async/client/utils.py | 135 +-------- elasticsearch/_async/client/watcher.py | 40 +-- elasticsearch/_async/client/xpack.py | 8 +- elasticsearch/client/cluster.py | 8 +- elasticsearch/client/indices.py | 50 +++- elasticsearch/client/ml.py | 12 +- elasticsearch/client/snapshot.py | 2 +- 33 files changed, 796 insertions(+), 865 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index f93281a71..10790e305 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -6,9 +6,7 @@ from __future__ import unicode_literals import logging -from ..transport import Transport -from ..exceptions import TransportError -from ..compat import string_types, urlparse, unquote +from ..transport import AsyncTransport, TransportError from .async_search import AsyncSearchClient from .autoscaling import AutoscalingClient from .indices import IndicesClient @@ -20,7 +18,7 @@ from .snapshot import SnapshotClient from .tasks import TasksClient from .xpack import XPackClient -from .utils import query_params, _make_path, SKIP_IN_PATH, _bulk_body +from .utils import query_params, _make_path, SKIP_IN_PATH, _bulk_body, _normalize_hosts # xpack APIs from .ccr import CcrClient @@ -45,52 +43,7 @@ logger = logging.getLogger("elasticsearch") -def _normalize_hosts(hosts): - """ - Helper function to transform hosts argument to - :class:`~elasticsearch.Elasticsearch` to a list of dicts. - """ - # if hosts are empty, just defer to defaults down the line - if hosts is None: - return [{}] - - # passed in just one string - if isinstance(hosts, string_types): - hosts = [hosts] - - out = [] - # normalize hosts to dicts - for host in hosts: - if isinstance(host, string_types): - if "://" not in host: - host = "//%s" % host - - parsed_url = urlparse(host) - h = {"host": parsed_url.hostname} - - if parsed_url.port: - h["port"] = parsed_url.port - - if parsed_url.scheme == "https": - h["port"] = parsed_url.port or 443 - h["use_ssl"] = True - - if parsed_url.username or parsed_url.password: - h["http_auth"] = "%s:%s" % ( - unquote(parsed_url.username), - unquote(parsed_url.password), - ) - - if parsed_url.path and parsed_url.path != "/": - h["url_prefix"] = parsed_url.path - - out.append(h) - else: - out.append(host) - return out - - -class Elasticsearch(object): +class AsyncElasticsearch(object): """ Elasticsearch low-level client. Provides a straightforward mapping from Python to ES REST endpoints. @@ -215,7 +168,7 @@ def default(self, obj): """ - def __init__(self, hosts=None, transport_class=Transport, **kwargs): + def __init__(self, hosts=None, transport_class=AsyncTransport, **kwargs): """ :arg hosts: list of nodes, or a single node, we should connect to. Node should be a dictionary ({"host": "localhost", "port": 9200}), @@ -274,29 +227,40 @@ def __repr__(self): return "<{cls}({cons})>".format(cls=self.__class__.__name__, cons=cons) except Exception: # probably operating on custom transport and connection_pool, ignore - return super(Elasticsearch, self).__repr__() + return super(AsyncElasticsearch, self).__repr__() + + async def __aenter__(self): + if hasattr(self.transport, "_async_call"): + await self.transport._async_call() + return self + + async def __aexit__(self, *_): + await self.close() + + async def close(self): + await self.transport.close() # AUTO-GENERATED-API-DEFINITIONS # @query_params() - def ping(self, params=None, headers=None): + async def ping(self, params=None, headers=None): """ Returns whether the cluster is running. ``_ """ try: - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", "/", params=params, headers=headers ) except TransportError: return False @query_params() - def info(self, params=None, headers=None): + async def info(self, params=None, headers=None): """ Returns basic information about the cluster. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/", params=params, headers=headers ) @@ -309,7 +273,7 @@ def info(self, params=None, headers=None): "version_type", "wait_for_active_shards", ) - def create(self, index, id, body, doc_type=None, params=None, headers=None): + async def create(self, index, id, body, doc_type=None, params=None, headers=None): """ Creates a new document in the index. Returns a 409 response when a document with a same ID already exists in the index. @@ -345,7 +309,7 @@ def create(self, index, id, body, doc_type=None, params=None, headers=None): else: path = _make_path(index, doc_type, id) - return self.transport.perform_request( + return await self.transport.perform_request( "POST" if id in SKIP_IN_PATH else "PUT", path, params=params, @@ -365,7 +329,7 @@ def create(self, index, id, body, doc_type=None, params=None, headers=None): "version_type", "wait_for_active_shards", ) - def index(self, index, body, id=None, params=None, headers=None): + async def index(self, index, body, id=None, params=None, headers=None): """ Creates or updates a document in an index. ``_ @@ -403,7 +367,7 @@ def index(self, index, body, id=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST" if id in SKIP_IN_PATH else "PUT", _make_path(index, "_doc", id), params=params, @@ -421,7 +385,7 @@ def index(self, index, body, id=None, params=None, headers=None): "timeout", "wait_for_active_shards", ) - def bulk(self, body, index=None, doc_type=None, params=None, headers=None): + async def bulk(self, body, index=None, doc_type=None, params=None, headers=None): """ Allows to perform multiple index/update/delete operations in a single request. ``_ @@ -456,7 +420,7 @@ def bulk(self, body, index=None, doc_type=None, params=None, headers=None): raise ValueError("Empty value passed for a required argument 'body'.") body = _bulk_body(self.transport.serializer, body) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, doc_type, "_bulk"), params=params, @@ -465,7 +429,7 @@ def bulk(self, body, index=None, doc_type=None, params=None, headers=None): ) @query_params() - def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None): + async def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None): """ Explicitly clears the search context for a scroll. ``_ @@ -481,7 +445,7 @@ def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None): elif scroll_id: params["scroll_id"] = scroll_id - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", "/_search/scroll", params=params, headers=headers, body=body ) @@ -501,7 +465,7 @@ def clear_scroll(self, body=None, scroll_id=None, params=None, headers=None): "routing", "terminate_after", ) - def count(self, body=None, index=None, params=None, headers=None): + async def count(self, body=None, index=None, params=None, headers=None): """ Returns number of documents matching a query. ``_ @@ -538,7 +502,7 @@ def count(self, body=None, index=None, params=None, headers=None): :arg terminate_after: The maximum count for each shard, upon reaching which the query execution will terminate early """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_count"), params=params, @@ -556,7 +520,7 @@ def count(self, body=None, index=None, params=None, headers=None): "version_type", "wait_for_active_shards", ) - def delete(self, index, id, doc_type=None, params=None, headers=None): + async def delete(self, index, id, doc_type=None, params=None, headers=None): """ Removes a document from the index. ``_ @@ -592,7 +556,7 @@ def delete(self, index, id, doc_type=None, params=None, headers=None): if doc_type in SKIP_IN_PATH: doc_type = "_doc" - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path(index, doc_type, id), params=params, headers=headers ) @@ -630,7 +594,7 @@ def delete(self, index, id, doc_type=None, params=None, headers=None): "wait_for_active_shards", "wait_for_completion", ) - def delete_by_query(self, index, body, params=None, headers=None): + async def delete_by_query(self, index, body, params=None, headers=None): """ Deletes documents matching the provided query. ``_ @@ -713,7 +677,7 @@ def delete_by_query(self, index, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_delete_by_query"), params=params, @@ -722,7 +686,7 @@ def delete_by_query(self, index, body, params=None, headers=None): ) @query_params("requests_per_second") - def delete_by_query_rethrottle(self, task_id, params=None, headers=None): + async def delete_by_query_rethrottle(self, task_id, params=None, headers=None): """ Changes the number of requests per second for a particular Delete By Query operation. @@ -735,7 +699,7 @@ def delete_by_query_rethrottle(self, task_id, params=None, headers=None): if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_delete_by_query", task_id, "_rethrottle"), params=params, @@ -743,7 +707,7 @@ def delete_by_query_rethrottle(self, task_id, params=None, headers=None): ) @query_params("master_timeout", "timeout") - def delete_script(self, id, params=None, headers=None): + async def delete_script(self, id, params=None, headers=None): """ Deletes a script. ``_ @@ -755,7 +719,7 @@ def delete_script(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_scripts", id), params=params, headers=headers ) @@ -771,7 +735,7 @@ def delete_script(self, id, params=None, headers=None): "version", "version_type", ) - def exists(self, index, id, params=None, headers=None): + async def exists(self, index, id, params=None, headers=None): """ Returns information about whether a document exists in an index. ``_ @@ -801,7 +765,7 @@ def exists(self, index, id, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path(index, "_doc", id), params=params, headers=headers ) @@ -816,7 +780,7 @@ def exists(self, index, id, params=None, headers=None): "version", "version_type", ) - def exists_source(self, index, id, doc_type=None, params=None, headers=None): + async def exists_source(self, index, id, doc_type=None, params=None, headers=None): """ Returns information about whether a document source exists in an index. ``_ @@ -846,7 +810,7 @@ def exists_source(self, index, id, doc_type=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path(index, doc_type, id, "_source"), params=params, @@ -867,7 +831,7 @@ def exists_source(self, index, id, doc_type=None, params=None, headers=None): "routing", "stored_fields", ) - def explain(self, index, id, body=None, params=None, headers=None): + async def explain(self, index, id, body=None, params=None, headers=None): """ Returns information about why a specific matches (or doesn't match) a query. ``_ @@ -901,7 +865,7 @@ def explain(self, index, id, body=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_explain", id), params=params, @@ -916,7 +880,7 @@ def explain(self, index, id, body=None, params=None, headers=None): "ignore_unavailable", "include_unmapped", ) - def field_caps(self, index=None, params=None, headers=None): + async def field_caps(self, index=None, params=None, headers=None): """ Returns the information about the capabilities of fields among multiple indices. @@ -936,7 +900,7 @@ def field_caps(self, index=None, params=None, headers=None): :arg include_unmapped: Indicates whether unmapped fields should be included in the response. """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_field_caps"), params=params, headers=headers ) @@ -952,7 +916,7 @@ def field_caps(self, index=None, params=None, headers=None): "version", "version_type", ) - def get(self, index, id, params=None, headers=None): + async def get(self, index, id, params=None, headers=None): """ Returns a document. ``_ @@ -982,12 +946,12 @@ def get(self, index, id, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_doc", id), params=params, headers=headers ) @query_params("master_timeout") - def get_script(self, id, params=None, headers=None): + async def get_script(self, id, params=None, headers=None): """ Returns a script. ``_ @@ -998,7 +962,7 @@ def get_script(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_scripts", id), params=params, headers=headers ) @@ -1013,7 +977,7 @@ def get_script(self, id, params=None, headers=None): "version", "version_type", ) - def get_source(self, index, id, params=None, headers=None): + async def get_source(self, index, id, params=None, headers=None): """ Returns the source of a document. ``_ @@ -1041,7 +1005,7 @@ def get_source(self, index, id, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_source", id), params=params, headers=headers ) @@ -1055,7 +1019,7 @@ def get_source(self, index, id, params=None, headers=None): "routing", "stored_fields", ) - def mget(self, body, index=None, params=None, headers=None): + async def mget(self, body, index=None, params=None, headers=None): """ Allows to get multiple documents in one request. ``_ @@ -1083,7 +1047,7 @@ def mget(self, body, index=None, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_mget"), params=params, @@ -1100,7 +1064,7 @@ def mget(self, body, index=None, params=None, headers=None): "search_type", "typed_keys", ) - def msearch(self, body, index=None, params=None, headers=None): + async def msearch(self, body, index=None, params=None, headers=None): """ Allows to execute several search operations in one request. ``_ @@ -1137,7 +1101,7 @@ def msearch(self, body, index=None, params=None, headers=None): raise ValueError("Empty value passed for a required argument 'body'.") body = _bulk_body(self.transport.serializer, body) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_msearch"), params=params, @@ -1146,7 +1110,7 @@ def msearch(self, body, index=None, params=None, headers=None): ) @query_params("master_timeout", "timeout") - def put_script(self, id, body, context=None, params=None, headers=None): + async def put_script(self, id, body, context=None, params=None, headers=None): """ Creates or updates a script. ``_ @@ -1161,7 +1125,7 @@ def put_script(self, id, body, context=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_scripts", id, context), params=params, @@ -1172,7 +1136,7 @@ def put_script(self, id, body, context=None, params=None, headers=None): @query_params( "allow_no_indices", "expand_wildcards", "ignore_unavailable", "search_type" ) - def rank_eval(self, body, index=None, params=None, headers=None): + async def rank_eval(self, body, index=None, params=None, headers=None): """ Allows to evaluate the quality of ranked search results over a set of typical search queries @@ -1196,7 +1160,7 @@ def rank_eval(self, body, index=None, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_rank_eval"), params=params, @@ -1214,7 +1178,7 @@ def rank_eval(self, body, index=None, params=None, headers=None): "wait_for_active_shards", "wait_for_completion", ) - def reindex(self, body, params=None, headers=None): + async def reindex(self, body, params=None, headers=None): """ Allows to copy documents from one index to another, optionally filtering the source documents by a query, changing the destination index settings, or @@ -1246,12 +1210,12 @@ def reindex(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_reindex", params=params, headers=headers, body=body ) @query_params("requests_per_second") - def reindex_rethrottle(self, task_id, params=None, headers=None): + async def reindex_rethrottle(self, task_id, params=None, headers=None): """ Changes the number of requests per second for a particular Reindex operation. ``_ @@ -1263,7 +1227,7 @@ def reindex_rethrottle(self, task_id, params=None, headers=None): if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_reindex", task_id, "_rethrottle"), params=params, @@ -1271,7 +1235,9 @@ def reindex_rethrottle(self, task_id, params=None, headers=None): ) @query_params() - def render_search_template(self, body=None, id=None, params=None, headers=None): + async def render_search_template( + self, body=None, id=None, params=None, headers=None + ): """ Allows to use the Mustache language to pre-render a search definition. ``_ @@ -1279,7 +1245,7 @@ def render_search_template(self, body=None, id=None, params=None, headers=None): :arg body: The search definition template and its params :arg id: The id of the stored search template """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_render", "template", id), params=params, @@ -1288,14 +1254,14 @@ def render_search_template(self, body=None, id=None, params=None, headers=None): ) @query_params() - def scripts_painless_execute(self, body=None, params=None, headers=None): + async def scripts_painless_execute(self, body=None, params=None, headers=None): """ Allows an arbitrary script to be executed and a result to be returned ``_ :arg body: The script to execute """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_scripts/painless/_execute", params=params, @@ -1304,7 +1270,7 @@ def scripts_painless_execute(self, body=None, params=None, headers=None): ) @query_params("rest_total_hits_as_int", "scroll") - def scroll(self, body=None, scroll_id=None, params=None, headers=None): + async def scroll(self, body=None, scroll_id=None, params=None, headers=None): """ Allows to retrieve a large numbers of results from a single search request. ``_ @@ -1324,7 +1290,7 @@ def scroll(self, body=None, scroll_id=None, params=None, headers=None): elif scroll_id: params["scroll_id"] = scroll_id - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_search/scroll", params=params, headers=headers, body=body ) @@ -1372,7 +1338,7 @@ def scroll(self, body=None, scroll_id=None, params=None, headers=None): "typed_keys", "version", ) - def search(self, body=None, index=None, params=None, headers=None): + async def search(self, body=None, index=None, params=None, headers=None): """ Returns results matching a query. ``_ @@ -1475,7 +1441,7 @@ def search(self, body=None, index=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_search"), params=params, @@ -1491,7 +1457,7 @@ def search(self, body=None, index=None, params=None, headers=None): "preference", "routing", ) - def search_shards(self, index=None, params=None, headers=None): + async def search_shards(self, index=None, params=None, headers=None): """ Returns information about the indices and shards that a search request would be executed against. @@ -1513,7 +1479,7 @@ def search_shards(self, index=None, params=None, headers=None): be performed on (default: random) :arg routing: Specific routing value """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_search_shards"), params=params, headers=headers ) @@ -1530,7 +1496,7 @@ def search_shards(self, index=None, params=None, headers=None): "timeout", "wait_for_active_shards", ) - def update(self, index, id, body, doc_type=None, params=None, headers=None): + async def update(self, index, id, body, doc_type=None, params=None, headers=None): """ Updates a document with a script or partial document. ``_ @@ -1576,12 +1542,12 @@ def update(self, index, id, body, doc_type=None, params=None, headers=None): else: path = _make_path(index, doc_type, id, "_update") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", path, params=params, headers=headers, body=body ) @query_params("requests_per_second") - def update_by_query_rethrottle(self, task_id, params=None, headers=None): + async def update_by_query_rethrottle(self, task_id, params=None, headers=None): """ Changes the number of requests per second for a particular Update By Query operation. @@ -1594,7 +1560,7 @@ def update_by_query_rethrottle(self, task_id, params=None, headers=None): if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_update_by_query", task_id, "_rethrottle"), params=params, @@ -1602,22 +1568,22 @@ def update_by_query_rethrottle(self, task_id, params=None, headers=None): ) @query_params() - def get_script_context(self, params=None, headers=None): + async def get_script_context(self, params=None, headers=None): """ Returns all script contexts. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_script_context", params=params, headers=headers ) @query_params() - def get_script_languages(self, params=None, headers=None): + async def get_script_languages(self, params=None, headers=None): """ Returns available script types, languages and contexts ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_script_language", params=params, headers=headers ) @@ -1628,7 +1594,7 @@ def get_script_languages(self, params=None, headers=None): "search_type", "typed_keys", ) - def msearch_template(self, body, index=None, params=None, headers=None): + async def msearch_template(self, body, index=None, params=None, headers=None): """ Allows to execute several search template operations in one request. ``_ @@ -1654,7 +1620,7 @@ def msearch_template(self, body, index=None, params=None, headers=None): raise ValueError("Empty value passed for a required argument 'body'.") body = _bulk_body(self.transport.serializer, body) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_msearch", "template"), params=params, @@ -1676,7 +1642,7 @@ def msearch_template(self, body, index=None, params=None, headers=None): "version", "version_type", ) - def mtermvectors(self, body=None, index=None, params=None, headers=None): + async def mtermvectors(self, body=None, index=None, params=None, headers=None): """ Returns multiple termvectors in one request. ``_ @@ -1717,7 +1683,7 @@ def mtermvectors(self, body=None, index=None, params=None, headers=None): :arg version_type: Specific version type Valid choices: internal, external, external_gte """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_mtermvectors"), params=params, @@ -1740,7 +1706,7 @@ def mtermvectors(self, body=None, index=None, params=None, headers=None): "search_type", "typed_keys", ) - def search_template(self, body, index=None, params=None, headers=None): + async def search_template(self, body, index=None, params=None, headers=None): """ Allows to use the Mustache language to pre-render a search definition. ``_ @@ -1780,7 +1746,7 @@ def search_template(self, body, index=None, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_search", "template"), params=params, @@ -1801,7 +1767,7 @@ def search_template(self, body, index=None, params=None, headers=None): "version", "version_type", ) - def termvectors(self, index, body=None, id=None, params=None, headers=None): + async def termvectors(self, index, body=None, id=None, params=None, headers=None): """ Returns information and statistics about terms in the fields of a particular document. @@ -1836,7 +1802,7 @@ def termvectors(self, index, body=None, id=None, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_termvectors", id), params=params, @@ -1880,7 +1846,7 @@ def termvectors(self, index, body=None, id=None, params=None, headers=None): "wait_for_active_shards", "wait_for_completion", ) - def update_by_query(self, index, body=None, params=None, headers=None): + async def update_by_query(self, index, body=None, params=None, headers=None): """ Performs an update on every document in the index without changing the source, for example to pick up a mapping change. @@ -1967,7 +1933,7 @@ def update_by_query(self, index, body=None, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_update_by_query"), params=params, diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index d6062cbee..02df0f0f6 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -7,7 +7,7 @@ class AsyncSearchClient(NamespacedClient): @query_params() - def delete(self, id, params=None, headers=None): + async def delete(self, id, params=None, headers=None): """ Deletes an async search by ID. If the search is still running, the search request will be cancelled. Otherwise, the saved search results are deleted. @@ -18,12 +18,12 @@ def delete(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_async_search", id), params=params, headers=headers ) @query_params("keep_alive", "typed_keys", "wait_for_completion_timeout") - def get(self, id, params=None, headers=None): + async def get(self, id, params=None, headers=None): """ Retrieves the results of a previously submitted async search request given its ID. @@ -40,7 +40,7 @@ def get(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_async_search", id), params=params, headers=headers ) @@ -87,7 +87,7 @@ def get(self, id, params=None, headers=None): "version", "wait_for_completion_timeout", ) - def submit(self, body=None, index=None, params=None, headers=None): + async def submit(self, body=None, index=None, params=None, headers=None): """ Executes a search request asynchronously. ``_ @@ -182,7 +182,7 @@ def submit(self, body=None, index=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_async_search"), params=params, diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index a648d79e4..f57850129 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -7,18 +7,18 @@ class AutoscalingClient(NamespacedClient): @query_params() - def get_autoscaling_decision(self, params=None, headers=None): + async def get_autoscaling_decision(self, params=None, headers=None): """ Gets the current autoscaling decision based on the configured autoscaling policy, indicating whether or not autoscaling is needed. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_autoscaling/decision", params=params, headers=headers ) @query_params() - def delete_autoscaling_policy(self, name, params=None, headers=None): + async def delete_autoscaling_policy(self, name, params=None, headers=None): """ Deletes an autoscaling policy. ``_ @@ -28,7 +28,7 @@ def delete_autoscaling_policy(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_autoscaling", "policy", name), params=params, @@ -36,7 +36,7 @@ def delete_autoscaling_policy(self, name, params=None, headers=None): ) @query_params() - def put_autoscaling_policy(self, name, body, params=None, headers=None): + async def put_autoscaling_policy(self, name, body, params=None, headers=None): """ Creates a new autoscaling policy. ``_ @@ -48,7 +48,7 @@ def put_autoscaling_policy(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_autoscaling", "policy", name), params=params, @@ -57,7 +57,7 @@ def put_autoscaling_policy(self, name, body, params=None, headers=None): ) @query_params() - def get_autoscaling_policy(self, name, params=None, headers=None): + async def get_autoscaling_policy(self, name, params=None, headers=None): """ Retrieves an autoscaling policy. ``_ @@ -67,7 +67,7 @@ def get_autoscaling_policy(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_autoscaling", "policy", name), params=params, diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 84282850e..87bc7c8fe 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -7,7 +7,7 @@ class CatClient(NamespacedClient): @query_params("expand_wildcards", "format", "h", "help", "local", "s", "v") - def aliases(self, name=None, params=None, headers=None): + async def aliases(self, name=None, params=None, headers=None): """ Shows information about currently configured aliases to indices including filter and routing infos. @@ -27,12 +27,12 @@ def aliases(self, name=None, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "aliases", name), params=params, headers=headers ) @query_params("bytes", "format", "h", "help", "local", "master_timeout", "s", "v") - def allocation(self, node_id=None, params=None, headers=None): + async def allocation(self, node_id=None, params=None, headers=None): """ Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using. @@ -54,7 +54,7 @@ def allocation(self, node_id=None, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "allocation", node_id), params=params, @@ -62,7 +62,7 @@ def allocation(self, node_id=None, params=None, headers=None): ) @query_params("format", "h", "help", "s", "v") - def count(self, index=None, params=None, headers=None): + async def count(self, index=None, params=None, headers=None): """ Provides quick access to the document count of the entire cluster, or individual indices. @@ -78,12 +78,12 @@ def count(self, index=None, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "count", index), params=params, headers=headers ) @query_params("format", "h", "help", "s", "time", "ts", "v") - def health(self, params=None, headers=None): + async def health(self, params=None, headers=None): """ Returns a concise representation of the cluster health. ``_ @@ -99,12 +99,12 @@ def health(self, params=None, headers=None): :arg ts: Set to false to disable timestamping Default: True :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/health", params=params, headers=headers ) @query_params("help", "s") - def help(self, params=None, headers=None): + async def help(self, params=None, headers=None): """ Returns help for the Cat APIs. ``_ @@ -113,7 +113,7 @@ def help(self, params=None, headers=None): :arg s: Comma-separated list of column names or column aliases to sort by """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat", params=params, headers=headers ) @@ -132,7 +132,7 @@ def help(self, params=None, headers=None): "time", "v", ) - def indices(self, index=None, params=None, headers=None): + async def indices(self, index=None, params=None, headers=None): """ Returns information about indices: number of primaries and replicas, document counts, disk size, ... @@ -166,12 +166,12 @@ def indices(self, index=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "indices", index), params=params, headers=headers ) @query_params("format", "h", "help", "local", "master_timeout", "s", "v") - def master(self, params=None, headers=None): + async def master(self, params=None, headers=None): """ Returns information about the master node. ``_ @@ -188,14 +188,14 @@ def master(self, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/master", params=params, headers=headers ) @query_params( "bytes", "format", "full_id", "h", "help", "master_timeout", "s", "time", "v" ) - def nodes(self, params=None, headers=None): + async def nodes(self, params=None, headers=None): """ Returns basic statistics about performance of cluster nodes. ``_ @@ -216,14 +216,14 @@ def nodes(self, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/nodes", params=params, headers=headers ) @query_params( "active_only", "bytes", "detailed", "format", "h", "help", "s", "time", "v" ) - def recovery(self, index=None, params=None, headers=None): + async def recovery(self, index=None, params=None, headers=None): """ Returns information about index shard recoveries, both on-going completed. ``_ @@ -246,14 +246,14 @@ def recovery(self, index=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "recovery", index), params=params, headers=headers ) @query_params( "bytes", "format", "h", "help", "local", "master_timeout", "s", "time", "v" ) - def shards(self, index=None, params=None, headers=None): + async def shards(self, index=None, params=None, headers=None): """ Provides a detailed view of shard allocation on nodes. ``_ @@ -276,12 +276,12 @@ def shards(self, index=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "shards", index), params=params, headers=headers ) @query_params("bytes", "format", "h", "help", "s", "v") - def segments(self, index=None, params=None, headers=None): + async def segments(self, index=None, params=None, headers=None): """ Provides low-level information about the segments in the shards of an index. ``_ @@ -298,12 +298,12 @@ def segments(self, index=None, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "segments", index), params=params, headers=headers ) @query_params("format", "h", "help", "local", "master_timeout", "s", "time", "v") - def pending_tasks(self, params=None, headers=None): + async def pending_tasks(self, params=None, headers=None): """ Returns a concise representation of the cluster pending tasks. ``_ @@ -322,12 +322,12 @@ def pending_tasks(self, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/pending_tasks", params=params, headers=headers ) @query_params("format", "h", "help", "local", "master_timeout", "s", "time", "v") - def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): + async def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): """ Returns cluster-wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. @@ -349,7 +349,7 @@ def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "thread_pool", thread_pool_patterns), params=params, @@ -357,7 +357,7 @@ def thread_pool(self, thread_pool_patterns=None, params=None, headers=None): ) @query_params("bytes", "format", "h", "help", "s", "v") - def fielddata(self, fields=None, params=None, headers=None): + async def fielddata(self, fields=None, params=None, headers=None): """ Shows how much heap memory is currently being used by fielddata on every data node in the cluster. @@ -375,7 +375,7 @@ def fielddata(self, fields=None, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "fielddata", fields), params=params, @@ -383,7 +383,7 @@ def fielddata(self, fields=None, params=None, headers=None): ) @query_params("format", "h", "help", "local", "master_timeout", "s", "v") - def plugins(self, params=None, headers=None): + async def plugins(self, params=None, headers=None): """ Returns information about installed plugins across nodes node. ``_ @@ -400,12 +400,12 @@ def plugins(self, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/plugins", params=params, headers=headers ) @query_params("format", "h", "help", "local", "master_timeout", "s", "v") - def nodeattrs(self, params=None, headers=None): + async def nodeattrs(self, params=None, headers=None): """ Returns information about custom node attributes. ``_ @@ -422,12 +422,12 @@ def nodeattrs(self, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/nodeattrs", params=params, headers=headers ) @query_params("format", "h", "help", "local", "master_timeout", "s", "v") - def repositories(self, params=None, headers=None): + async def repositories(self, params=None, headers=None): """ Returns information about snapshot repositories registered in the cluster. ``_ @@ -444,14 +444,14 @@ def repositories(self, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/repositories", params=params, headers=headers ) @query_params( "format", "h", "help", "ignore_unavailable", "master_timeout", "s", "time", "v" ) - def snapshots(self, repository=None, params=None, headers=None): + async def snapshots(self, repository=None, params=None, headers=None): """ Returns all snapshots in a specific repository. ``_ @@ -472,7 +472,7 @@ def snapshots(self, repository=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "snapshots", repository), params=params, @@ -491,7 +491,7 @@ def snapshots(self, repository=None, params=None, headers=None): "time", "v", ) - def tasks(self, params=None, headers=None): + async def tasks(self, params=None, headers=None): """ Returns information about the tasks currently executing on one or more nodes in the cluster. @@ -516,12 +516,12 @@ def tasks(self, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cat/tasks", params=params, headers=headers ) @query_params("format", "h", "help", "local", "master_timeout", "s", "v") - def templates(self, name=None, params=None, headers=None): + async def templates(self, name=None, params=None, headers=None): """ Returns information about existing templates. ``_ @@ -539,12 +539,12 @@ def templates(self, name=None, params=None, headers=None): to sort by :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "templates", name), params=params, headers=headers ) @query_params("allow_no_match", "bytes", "format", "h", "help", "s", "time", "v") - def ml_data_frame_analytics(self, id=None, params=None, headers=None): + async def ml_data_frame_analytics(self, id=None, params=None, headers=None): """ Gets configuration and usage information about data frame analytics jobs. ``_ @@ -565,7 +565,7 @@ def ml_data_frame_analytics(self, id=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "ml", "data_frame", "analytics", id), params=params, @@ -573,7 +573,7 @@ def ml_data_frame_analytics(self, id=None, params=None, headers=None): ) @query_params("allow_no_datafeeds", "format", "h", "help", "s", "time", "v") - def ml_datafeeds(self, datafeed_id=None, params=None, headers=None): + async def ml_datafeeds(self, datafeed_id=None, params=None, headers=None): """ Gets configuration and usage information about datafeeds. ``_ @@ -592,7 +592,7 @@ def ml_datafeeds(self, datafeed_id=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "ml", "datafeeds", datafeed_id), params=params, @@ -600,7 +600,7 @@ def ml_datafeeds(self, datafeed_id=None, params=None, headers=None): ) @query_params("allow_no_jobs", "bytes", "format", "h", "help", "s", "time", "v") - def ml_jobs(self, job_id=None, params=None, headers=None): + async def ml_jobs(self, job_id=None, params=None, headers=None): """ Gets configuration and usage information about anomaly detection jobs. ``_ @@ -621,7 +621,7 @@ def ml_jobs(self, job_id=None, params=None, headers=None): choices: d, h, m, s, ms, micros, nanos :arg v: Verbose mode. Display column headers """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "ml", "anomaly_detectors", job_id), params=params, @@ -640,7 +640,7 @@ def ml_jobs(self, job_id=None, params=None, headers=None): "time", "v", ) - def ml_trained_models(self, model_id=None, params=None, headers=None): + async def ml_trained_models(self, model_id=None, params=None, headers=None): """ Gets configuration and usage information about inference trained models. ``_ @@ -668,7 +668,7 @@ def ml_trained_models(self, model_id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "ml", "trained_models", model_id), params=params, @@ -678,7 +678,7 @@ def ml_trained_models(self, model_id=None, params=None, headers=None): @query_params( "allow_no_match", "format", "from_", "h", "help", "s", "size", "time", "v" ) - def transforms(self, transform_id=None, params=None, headers=None): + async def transforms(self, transform_id=None, params=None, headers=None): """ Gets configuration and usage information about transforms. ``_ @@ -705,7 +705,7 @@ def transforms(self, transform_id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cat", "transforms", transform_id), params=params, diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index fa0568fc1..9c30736b2 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -7,7 +7,7 @@ class CcrClient(NamespacedClient): @query_params() - def delete_auto_follow_pattern(self, name, params=None, headers=None): + async def delete_auto_follow_pattern(self, name, params=None, headers=None): """ Deletes auto-follow patterns. ``_ @@ -17,7 +17,7 @@ def delete_auto_follow_pattern(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ccr", "auto_follow", name), params=params, @@ -25,7 +25,7 @@ def delete_auto_follow_pattern(self, name, params=None, headers=None): ) @query_params("wait_for_active_shards") - def follow(self, index, body, params=None, headers=None): + async def follow(self, index, body, params=None, headers=None): """ Creates a new follower index configured to follow the referenced leader index. ``_ @@ -43,7 +43,7 @@ def follow(self, index, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index, "_ccr", "follow"), params=params, @@ -52,7 +52,7 @@ def follow(self, index, body, params=None, headers=None): ) @query_params() - def follow_info(self, index, params=None, headers=None): + async def follow_info(self, index, params=None, headers=None): """ Retrieves information about all follower indices, including parameters and status for each follower index @@ -64,12 +64,12 @@ def follow_info(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_ccr", "info"), params=params, headers=headers ) @query_params() - def follow_stats(self, index, params=None, headers=None): + async def follow_stats(self, index, params=None, headers=None): """ Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. @@ -81,12 +81,12 @@ def follow_stats(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_ccr", "stats"), params=params, headers=headers ) @query_params() - def forget_follower(self, index, body, params=None, headers=None): + async def forget_follower(self, index, body, params=None, headers=None): """ Removes the follower retention leases from the leader. ``_ @@ -102,7 +102,7 @@ def forget_follower(self, index, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_ccr", "forget_follower"), params=params, @@ -111,7 +111,7 @@ def forget_follower(self, index, body, params=None, headers=None): ) @query_params() - def get_auto_follow_pattern(self, name=None, params=None, headers=None): + async def get_auto_follow_pattern(self, name=None, params=None, headers=None): """ Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. @@ -119,7 +119,7 @@ def get_auto_follow_pattern(self, name=None, params=None, headers=None): :arg name: The name of the auto follow pattern. """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ccr", "auto_follow", name), params=params, @@ -127,7 +127,7 @@ def get_auto_follow_pattern(self, name=None, params=None, headers=None): ) @query_params() - def pause_follow(self, index, params=None, headers=None): + async def pause_follow(self, index, params=None, headers=None): """ Pauses a follower index. The follower index will not fetch any additional operations from the leader index. @@ -139,7 +139,7 @@ def pause_follow(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_ccr", "pause_follow"), params=params, @@ -147,7 +147,7 @@ def pause_follow(self, index, params=None, headers=None): ) @query_params() - def put_auto_follow_pattern(self, name, body, params=None, headers=None): + async def put_auto_follow_pattern(self, name, body, params=None, headers=None): """ Creates a new named collection of auto-follow patterns against a specified remote cluster. Newly created indices on the remote cluster matching any of the @@ -161,7 +161,7 @@ def put_auto_follow_pattern(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ccr", "auto_follow", name), params=params, @@ -170,7 +170,7 @@ def put_auto_follow_pattern(self, name, body, params=None, headers=None): ) @query_params() - def resume_follow(self, index, body=None, params=None, headers=None): + async def resume_follow(self, index, body=None, params=None, headers=None): """ Resumes a follower index that has been paused ``_ @@ -182,7 +182,7 @@ def resume_follow(self, index, body=None, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_ccr", "resume_follow"), params=params, @@ -191,17 +191,17 @@ def resume_follow(self, index, body=None, params=None, headers=None): ) @query_params() - def stats(self, params=None, headers=None): + async def stats(self, params=None, headers=None): """ Gets all stats related to cross-cluster replication. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_ccr/stats", params=params, headers=headers ) @query_params() - def unfollow(self, index, params=None, headers=None): + async def unfollow(self, index, params=None, headers=None): """ Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. @@ -213,7 +213,7 @@ def unfollow(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_ccr", "unfollow"), params=params, @@ -221,7 +221,7 @@ def unfollow(self, index, params=None, headers=None): ) @query_params() - def pause_auto_follow_pattern(self, name, params=None, headers=None): + async def pause_auto_follow_pattern(self, name, params=None, headers=None): """ Pauses an auto-follow pattern ``_ @@ -232,7 +232,7 @@ def pause_auto_follow_pattern(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ccr", "auto_follow", name, "pause"), params=params, @@ -240,7 +240,7 @@ def pause_auto_follow_pattern(self, name, params=None, headers=None): ) @query_params() - def resume_auto_follow_pattern(self, name, params=None, headers=None): + async def resume_auto_follow_pattern(self, name, params=None, headers=None): """ Resumes an auto-follow pattern that has been paused ``_ @@ -251,7 +251,7 @@ def resume_auto_follow_pattern(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ccr", "auto_follow", name, "resume"), params=params, diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 600b89e70..477f4590b 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -19,7 +19,7 @@ class ClusterClient(NamespacedClient): "wait_for_nodes", "wait_for_status", ) - def health(self, index=None, params=None, headers=None): + async def health(self, index=None, params=None, headers=None): """ Returns basic information about the health of the cluster. ``_ @@ -49,7 +49,7 @@ def health(self, index=None, params=None, headers=None): :arg wait_for_status: Wait until cluster is in a specific state Valid choices: green, yellow, red """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cluster", "health", index), params=params, @@ -57,7 +57,7 @@ def health(self, index=None, params=None, headers=None): ) @query_params("local", "master_timeout") - def pending_tasks(self, params=None, headers=None): + async def pending_tasks(self, params=None, headers=None): """ Returns a list of any cluster-level changes (e.g. create index, update mapping, allocate or fail shard) which have not yet been executed. @@ -67,7 +67,7 @@ def pending_tasks(self, params=None, headers=None): from master node (default: false) :arg master_timeout: Specify timeout for connection to master """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cluster/pending_tasks", params=params, headers=headers ) @@ -81,7 +81,7 @@ def pending_tasks(self, params=None, headers=None): "wait_for_metadata_version", "wait_for_timeout", ) - def state(self, metric=None, index=None, params=None, headers=None): + async def state(self, metric=None, index=None, params=None, headers=None): """ Returns a comprehensive information about the state of the cluster. ``_ @@ -112,7 +112,7 @@ def state(self, metric=None, index=None, params=None, headers=None): if index and metric in SKIP_IN_PATH: metric = "_all" - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_cluster", "state", metric, index), params=params, @@ -120,7 +120,7 @@ def state(self, metric=None, index=None, params=None, headers=None): ) @query_params("flat_settings", "timeout") - def stats(self, node_id=None, params=None, headers=None): + async def stats(self, node_id=None, params=None, headers=None): """ Returns high-level overview of cluster statistics. ``_ @@ -133,7 +133,7 @@ def stats(self, node_id=None, params=None, headers=None): false) :arg timeout: Explicit operation timeout """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cluster/stats" if node_id in SKIP_IN_PATH @@ -145,7 +145,7 @@ def stats(self, node_id=None, params=None, headers=None): @query_params( "dry_run", "explain", "master_timeout", "metric", "retry_failed", "timeout" ) - def reroute(self, body=None, params=None, headers=None): + async def reroute(self, body=None, params=None, headers=None): """ Allows to manually change the allocation of individual shards in the cluster. ``_ @@ -165,12 +165,12 @@ def reroute(self, body=None, params=None, headers=None): due to too many subsequent allocation failures :arg timeout: Explicit operation timeout """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_cluster/reroute", params=params, headers=headers, body=body ) @query_params("flat_settings", "include_defaults", "master_timeout", "timeout") - def get_settings(self, params=None, headers=None): + async def get_settings(self, params=None, headers=None): """ Returns cluster settings. ``_ @@ -183,12 +183,12 @@ def get_settings(self, params=None, headers=None): to master node :arg timeout: Explicit operation timeout """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_cluster/settings", params=params, headers=headers ) @query_params("flat_settings", "master_timeout", "timeout") - def put_settings(self, body, params=None, headers=None): + async def put_settings(self, body, params=None, headers=None): """ Updates the cluster settings. ``_ @@ -204,22 +204,22 @@ def put_settings(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", "/_cluster/settings", params=params, headers=headers, body=body ) @query_params() - def remote_info(self, params=None, headers=None): + async def remote_info(self, params=None, headers=None): """ Returns the information about configured remote clusters. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_remote/info", params=params, headers=headers ) @query_params("include_disk_info", "include_yes_decisions") - def allocation_explain(self, body=None, params=None, headers=None): + async def allocation_explain(self, body=None, params=None, headers=None): """ Provides explanations for shard allocations in the cluster. ``_ @@ -231,7 +231,7 @@ def allocation_explain(self, body=None, params=None, headers=None): :arg include_yes_decisions: Return 'YES' decisions in explanation (default: false) """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_cluster/allocation/explain", params=params, @@ -240,10 +240,10 @@ def allocation_explain(self, body=None, params=None, headers=None): ) @query_params("master_timeout", "timeout") - def delete_component_template(self, name, params=None, headers=None): + async def delete_component_template(self, name, params=None, headers=None): """ Deletes a component template - ``_ + ``_ :arg name: The name of the template :arg master_timeout: Specify timeout for connection to master @@ -252,7 +252,7 @@ def delete_component_template(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_component_template", name), params=params, @@ -260,10 +260,10 @@ def delete_component_template(self, name, params=None, headers=None): ) @query_params("local", "master_timeout") - def get_component_template(self, name=None, params=None, headers=None): + async def get_component_template(self, name=None, params=None, headers=None): """ Returns one or more component templates - ``_ + ``_ :arg name: The comma separated names of the component templates :arg local: Return local information, do not retrieve the state @@ -271,7 +271,7 @@ def get_component_template(self, name=None, params=None, headers=None): :arg master_timeout: Explicit operation timeout for connection to master node """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_component_template", name), params=params, @@ -279,10 +279,10 @@ def get_component_template(self, name=None, params=None, headers=None): ) @query_params("create", "master_timeout", "timeout") - def put_component_template(self, name, body, params=None, headers=None): + async def put_component_template(self, name, body, params=None, headers=None): """ Creates or updates a component template - ``_ + ``_ :arg name: The name of the template :arg body: The template definition @@ -295,7 +295,7 @@ def put_component_template(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_component_template", name), params=params, @@ -304,10 +304,10 @@ def put_component_template(self, name, body, params=None, headers=None): ) @query_params("local", "master_timeout") - def exists_component_template(self, name, params=None, headers=None): + async def exists_component_template(self, name, params=None, headers=None): """ Returns information about whether a particular component template exist - ``_ + ``_ :arg name: The name of the template :arg local: Return local information, do not retrieve the state @@ -318,7 +318,7 @@ def exists_component_template(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path("_component_template", name), params=params, @@ -326,7 +326,7 @@ def exists_component_template(self, name, params=None, headers=None): ) @query_params("wait_for_removal") - def delete_voting_config_exclusions(self, params=None, headers=None): + async def delete_voting_config_exclusions(self, params=None, headers=None): """ Clears cluster voting config exclusions. ``_ @@ -335,7 +335,7 @@ def delete_voting_config_exclusions(self, params=None, headers=None): excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Default: True """ - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", "/_cluster/voting_config_exclusions", params=params, @@ -343,7 +343,7 @@ def delete_voting_config_exclusions(self, params=None, headers=None): ) @query_params("node_ids", "node_names", "timeout") - def post_voting_config_exclusions(self, params=None, headers=None): + async def post_voting_config_exclusions(self, params=None, headers=None): """ Updates the cluster voting config exclusions by node ids or node names. ``_ @@ -356,6 +356,6 @@ def post_voting_config_exclusions(self, params=None, headers=None): not also specify ?node_ids. :arg timeout: Explicit operation timeout Default: 30s """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_cluster/voting_config_exclusions", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index ba51d0e19..a89f2a671 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -7,7 +7,7 @@ class EnrichClient(NamespacedClient): @query_params() - def delete_policy(self, name, params=None, headers=None): + async def delete_policy(self, name, params=None, headers=None): """ Deletes an existing enrich policy and its enrich index. ``_ @@ -17,7 +17,7 @@ def delete_policy(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_enrich", "policy", name), params=params, @@ -25,7 +25,7 @@ def delete_policy(self, name, params=None, headers=None): ) @query_params("wait_for_completion") - def execute_policy(self, name, params=None, headers=None): + async def execute_policy(self, name, params=None, headers=None): """ Creates the enrich index for an existing enrich policy. ``_ @@ -37,7 +37,7 @@ def execute_policy(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_enrich", "policy", name, "_execute"), params=params, @@ -45,19 +45,19 @@ def execute_policy(self, name, params=None, headers=None): ) @query_params() - def get_policy(self, name=None, params=None, headers=None): + async def get_policy(self, name=None, params=None, headers=None): """ Gets information about an enrich policy. ``_ :arg name: A comma-separated list of enrich policy names """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_enrich", "policy", name), params=params, headers=headers ) @query_params() - def put_policy(self, name, body, params=None, headers=None): + async def put_policy(self, name, body, params=None, headers=None): """ Creates a new enrich policy. ``_ @@ -69,7 +69,7 @@ def put_policy(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_enrich", "policy", name), params=params, @@ -78,12 +78,12 @@ def put_policy(self, name, body, params=None, headers=None): ) @query_params() - def stats(self, params=None, headers=None): + async def stats(self, params=None, headers=None): """ Gets enrich coordinator statistics and information about enrich policies that are currently executing. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_enrich/_stats", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 01bb4aaab..b60fb3763 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -7,7 +7,7 @@ class EqlClient(NamespacedClient): @query_params() - def search(self, index, body, params=None, headers=None): + async def search(self, index, body, params=None, headers=None): """ Returns results matching a query expressed in Event Query Language (EQL) ``_ @@ -20,7 +20,7 @@ def search(self, index, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_eql", "search"), params=params, diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index 3c5606155..3ad7f5316 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -7,7 +7,7 @@ class GraphClient(NamespacedClient): @query_params("routing", "timeout") - def explore(self, index, body=None, params=None, headers=None): + async def explore(self, index, body=None, params=None, headers=None): """ Explore extracted and summarized information about the documents and terms in an index. @@ -22,7 +22,7 @@ def explore(self, index, body=None, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_graph", "explore"), params=params, diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 4e09316d6..641decc6e 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -7,7 +7,7 @@ class IlmClient(NamespacedClient): @query_params() - def delete_lifecycle(self, policy, params=None, headers=None): + async def delete_lifecycle(self, policy, params=None, headers=None): """ Deletes the specified lifecycle policy definition. A currently used policy cannot be deleted. @@ -18,7 +18,7 @@ def delete_lifecycle(self, policy, params=None, headers=None): if policy in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'policy'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ilm", "policy", policy), params=params, @@ -26,7 +26,7 @@ def delete_lifecycle(self, policy, params=None, headers=None): ) @query_params("only_errors", "only_managed") - def explain_lifecycle(self, index, params=None, headers=None): + async def explain_lifecycle(self, index, params=None, headers=None): """ Retrieves information about the index's current lifecycle state, such as the currently executing phase, action, and step. @@ -41,12 +41,12 @@ def explain_lifecycle(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_ilm", "explain"), params=params, headers=headers ) @query_params() - def get_lifecycle(self, policy=None, params=None, headers=None): + async def get_lifecycle(self, policy=None, params=None, headers=None): """ Returns the specified policy definition. Includes the policy version and last modified date. @@ -54,22 +54,22 @@ def get_lifecycle(self, policy=None, params=None, headers=None): :arg policy: The name of the index lifecycle policy """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ilm", "policy", policy), params=params, headers=headers ) @query_params() - def get_status(self, params=None, headers=None): + async def get_status(self, params=None, headers=None): """ Retrieves the current index lifecycle management (ILM) status. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_ilm/status", params=params, headers=headers ) @query_params() - def move_to_step(self, index, body=None, params=None, headers=None): + async def move_to_step(self, index, body=None, params=None, headers=None): """ Manually moves an index into the specified step and executes that step. ``_ @@ -81,7 +81,7 @@ def move_to_step(self, index, body=None, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ilm", "move", index), params=params, @@ -90,7 +90,7 @@ def move_to_step(self, index, body=None, params=None, headers=None): ) @query_params() - def put_lifecycle(self, policy, body=None, params=None, headers=None): + async def put_lifecycle(self, policy, body=None, params=None, headers=None): """ Creates a lifecycle policy ``_ @@ -101,7 +101,7 @@ def put_lifecycle(self, policy, body=None, params=None, headers=None): if policy in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'policy'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ilm", "policy", policy), params=params, @@ -110,7 +110,7 @@ def put_lifecycle(self, policy, body=None, params=None, headers=None): ) @query_params() - def remove_policy(self, index, params=None, headers=None): + async def remove_policy(self, index, params=None, headers=None): """ Removes the assigned lifecycle policy and stops managing the specified index ``_ @@ -120,12 +120,12 @@ def remove_policy(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_ilm", "remove"), params=params, headers=headers ) @query_params() - def retry(self, index, params=None, headers=None): + async def retry(self, index, params=None, headers=None): """ Retries executing the policy for an index that is in the ERROR step. ``_ @@ -136,27 +136,27 @@ def retry(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_ilm", "retry"), params=params, headers=headers ) @query_params() - def start(self, params=None, headers=None): + async def start(self, params=None, headers=None): """ Start the index lifecycle management (ILM) plugin. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ilm/start", params=params, headers=headers ) @query_params() - def stop(self, params=None, headers=None): + async def stop(self, params=None, headers=None): """ Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ilm/stop", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 68efaf626..1e08dc1f3 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -7,7 +7,7 @@ class IndicesClient(NamespacedClient): @query_params() - def analyze(self, body=None, index=None, params=None, headers=None): + async def analyze(self, body=None, index=None, params=None, headers=None): """ Performs the analysis process on a text and return the tokens breakdown of the text. @@ -17,7 +17,7 @@ def analyze(self, body=None, index=None, params=None, headers=None): which the analysis should be performed :arg index: The name of the index to scope the operation """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_analyze"), params=params, @@ -26,7 +26,7 @@ def analyze(self, body=None, index=None, params=None, headers=None): ) @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") - def refresh(self, index=None, params=None, headers=None): + async def refresh(self, index=None, params=None, headers=None): """ Performs the refresh operation in one or more indices. ``_ @@ -42,7 +42,7 @@ def refresh(self, index=None, params=None, headers=None): :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_refresh"), params=params, headers=headers ) @@ -53,7 +53,7 @@ def refresh(self, index=None, params=None, headers=None): "ignore_unavailable", "wait_if_ongoing", ) - def flush(self, index=None, params=None, headers=None): + async def flush(self, index=None, params=None, headers=None): """ Performs the flush operation on one or more indices. ``_ @@ -78,12 +78,12 @@ def flush(self, index=None, params=None, headers=None): already executing. The default is true. If set to false the flush will be skipped iff if another flush operation is already running. """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_flush"), params=params, headers=headers ) @query_params("master_timeout", "timeout", "wait_for_active_shards") - def create(self, index, body=None, params=None, headers=None): + async def create(self, index, body=None, params=None, headers=None): """ Creates an index with optional settings and mappings. ``_ @@ -99,12 +99,12 @@ def create(self, index, body=None, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index), params=params, headers=headers, body=body ) @query_params("master_timeout", "timeout", "wait_for_active_shards") - def clone(self, index, target, body=None, params=None, headers=None): + async def clone(self, index, target, body=None, params=None, headers=None): """ Clones an index ``_ @@ -122,7 +122,7 @@ def clone(self, index, target, body=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index, "_clone", target), params=params, @@ -139,7 +139,7 @@ def clone(self, index, target, body=None, params=None, headers=None): "local", "master_timeout", ) - def get(self, index, params=None, headers=None): + async def get(self, index, params=None, headers=None): """ Returns information about one or more indices. ``_ @@ -163,7 +163,7 @@ def get(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index), params=params, headers=headers ) @@ -175,7 +175,7 @@ def get(self, index, params=None, headers=None): "timeout", "wait_for_active_shards", ) - def open(self, index, params=None, headers=None): + async def open(self, index, params=None, headers=None): """ Opens an index. ``_ @@ -197,7 +197,7 @@ def open(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_open"), params=params, headers=headers ) @@ -209,7 +209,7 @@ def open(self, index, params=None, headers=None): "timeout", "wait_for_active_shards", ) - def close(self, index, params=None, headers=None): + async def close(self, index, params=None, headers=None): """ Closes an index. ``_ @@ -231,7 +231,7 @@ def close(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_close"), params=params, headers=headers ) @@ -242,7 +242,7 @@ def close(self, index, params=None, headers=None): "master_timeout", "timeout", ) - def delete(self, index, params=None, headers=None): + async def delete(self, index, params=None, headers=None): """ Deletes an index. ``_ @@ -262,7 +262,7 @@ def delete(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path(index), params=params, headers=headers ) @@ -274,7 +274,7 @@ def delete(self, index, params=None, headers=None): "include_defaults", "local", ) - def exists(self, index, params=None, headers=None): + async def exists(self, index, params=None, headers=None): """ Returns information about whether a particular index exists. ``_ @@ -297,12 +297,12 @@ def exists(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path(index), params=params, headers=headers ) @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") - def exists_type(self, index, doc_type, params=None, headers=None): + async def exists_type(self, index, doc_type, params=None, headers=None): """ Returns information about whether a particular document type exists. (DEPRECATED) @@ -326,7 +326,7 @@ def exists_type(self, index, doc_type, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path(index, "_mapping", doc_type), params=params, @@ -340,7 +340,7 @@ def exists_type(self, index, doc_type, params=None, headers=None): "master_timeout", "timeout", ) - def put_mapping(self, index, body, params=None, headers=None): + async def put_mapping(self, index, body, params=None, headers=None): """ Updates the index mappings. ``_ @@ -364,7 +364,7 @@ def put_mapping(self, index, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index, "_mapping"), params=params, @@ -379,7 +379,7 @@ def put_mapping(self, index, body, params=None, headers=None): "local", "master_timeout", ) - def get_mapping(self, index=None, params=None, headers=None): + async def get_mapping(self, index=None, params=None, headers=None): """ Returns mappings for one or more indices. ``_ @@ -397,12 +397,12 @@ def get_mapping(self, index=None, params=None, headers=None): from master node (default: false) :arg master_timeout: Specify timeout for connection to master """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_mapping"), params=params, headers=headers ) @query_params("master_timeout", "timeout") - def put_alias(self, index, name, body=None, params=None, headers=None): + async def put_alias(self, index, name, body=None, params=None, headers=None): """ Creates or updates an alias. ``_ @@ -420,7 +420,7 @@ def put_alias(self, index, name, body=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index, "_alias", name), params=params, @@ -429,7 +429,7 @@ def put_alias(self, index, name, body=None, params=None, headers=None): ) @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") - def exists_alias(self, name, index=None, params=None, headers=None): + async def exists_alias(self, name, index=None, params=None, headers=None): """ Returns information about whether a particular alias exists. ``_ @@ -451,12 +451,12 @@ def exists_alias(self, name, index=None, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path(index, "_alias", name), params=params, headers=headers ) @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable", "local") - def get_alias(self, index=None, name=None, params=None, headers=None): + async def get_alias(self, index=None, name=None, params=None, headers=None): """ Returns an alias. ``_ @@ -475,12 +475,12 @@ def get_alias(self, index=None, name=None, params=None, headers=None): :arg local: Return local information, do not retrieve the state from master node (default: false) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_alias", name), params=params, headers=headers ) @query_params("master_timeout", "timeout") - def update_aliases(self, body, params=None, headers=None): + async def update_aliases(self, body, params=None, headers=None): """ Updates index aliases. ``_ @@ -492,12 +492,12 @@ def update_aliases(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_aliases", params=params, headers=headers, body=body ) @query_params("master_timeout", "timeout") - def delete_alias(self, index, name, params=None, headers=None): + async def delete_alias(self, index, name, params=None, headers=None): """ Deletes an alias. ``_ @@ -513,12 +513,12 @@ def delete_alias(self, index, name, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path(index, "_alias", name), params=params, headers=headers ) @query_params("create", "master_timeout", "order") - def put_template(self, name, body, params=None, headers=None): + async def put_template(self, name, body, params=None, headers=None): """ Creates or updates an index template. ``_ @@ -536,7 +536,7 @@ def put_template(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_template", name), params=params, @@ -545,7 +545,7 @@ def put_template(self, name, body, params=None, headers=None): ) @query_params("flat_settings", "local", "master_timeout") - def exists_template(self, name, params=None, headers=None): + async def exists_template(self, name, params=None, headers=None): """ Returns information about whether a particular index template exists. ``_ @@ -561,12 +561,12 @@ def exists_template(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path("_template", name), params=params, headers=headers ) @query_params("flat_settings", "local", "master_timeout") - def get_template(self, name=None, params=None, headers=None): + async def get_template(self, name=None, params=None, headers=None): """ Returns an index template. ``_ @@ -579,12 +579,12 @@ def get_template(self, name=None, params=None, headers=None): :arg master_timeout: Explicit operation timeout for connection to master node """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_template", name), params=params, headers=headers ) @query_params("master_timeout", "timeout") - def delete_template(self, name, params=None, headers=None): + async def delete_template(self, name, params=None, headers=None): """ Deletes an index template. ``_ @@ -596,7 +596,7 @@ def delete_template(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_template", name), params=params, headers=headers ) @@ -609,7 +609,7 @@ def delete_template(self, name, params=None, headers=None): "local", "master_timeout", ) - def get_settings(self, index=None, name=None, params=None, headers=None): + async def get_settings(self, index=None, name=None, params=None, headers=None): """ Returns settings for one or more indices. ``_ @@ -633,7 +633,7 @@ def get_settings(self, index=None, name=None, params=None, headers=None): from master node (default: false) :arg master_timeout: Specify timeout for connection to master """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_settings", name), params=params, headers=headers ) @@ -646,7 +646,7 @@ def get_settings(self, index=None, name=None, params=None, headers=None): "preserve_existing", "timeout", ) - def put_settings(self, body, index=None, params=None, headers=None): + async def put_settings(self, body, index=None, params=None, headers=None): """ Updates the index settings. ``_ @@ -673,7 +673,7 @@ def put_settings(self, body, index=None, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index, "_settings"), params=params, @@ -693,7 +693,7 @@ def put_settings(self, body, index=None, params=None, headers=None): "level", "types", ) - def stats(self, index=None, metric=None, params=None, headers=None): + async def stats(self, index=None, metric=None, params=None, headers=None): """ Provides statistics on operations happening in an index. ``_ @@ -729,14 +729,14 @@ def stats(self, index=None, metric=None, params=None, headers=None): :arg types: A comma-separated list of document types for the `indexing` index metric """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_stats", metric), params=params, headers=headers ) @query_params( "allow_no_indices", "expand_wildcards", "ignore_unavailable", "verbose" ) - def segments(self, index=None, params=None, headers=None): + async def segments(self, index=None, params=None, headers=None): """ Provides low-level information about segments in a Lucene index. ``_ @@ -753,7 +753,7 @@ def segments(self, index=None, params=None, headers=None): should be ignored when unavailable (missing or closed) :arg verbose: Includes detailed memory usage by Lucene. """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_segments"), params=params, headers=headers ) @@ -766,7 +766,7 @@ def segments(self, index=None, params=None, headers=None): "query", "request", ) - def clear_cache(self, index=None, params=None, headers=None): + async def clear_cache(self, index=None, params=None, headers=None): """ Clears all or specific caches for one or more indices. ``_ @@ -787,12 +787,12 @@ def clear_cache(self, index=None, params=None, headers=None): :arg query: Clear query caches :arg request: Clear request cache """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_cache", "clear"), params=params, headers=headers ) @query_params("active_only", "detailed") - def recovery(self, index=None, params=None, headers=None): + async def recovery(self, index=None, params=None, headers=None): """ Returns information about ongoing index shard recoveries. ``_ @@ -804,7 +804,7 @@ def recovery(self, index=None, params=None, headers=None): :arg detailed: Whether to display detailed information about shard recovery """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_recovery"), params=params, headers=headers ) @@ -815,7 +815,7 @@ def recovery(self, index=None, params=None, headers=None): "only_ancient_segments", "wait_for_completion", ) - def upgrade(self, index=None, params=None, headers=None): + async def upgrade(self, index=None, params=None, headers=None): """ DEPRECATED Upgrades to the current version of Lucene. ``_ @@ -835,12 +835,12 @@ def upgrade(self, index=None, params=None, headers=None): :arg wait_for_completion: Specify whether the request should block until the all segments are upgraded (default: false) """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_upgrade"), params=params, headers=headers ) @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") - def get_upgrade(self, index=None, params=None, headers=None): + async def get_upgrade(self, index=None, params=None, headers=None): """ DEPRECATED Returns a progress status of current upgrade. ``_ @@ -856,14 +856,14 @@ def get_upgrade(self, index=None, params=None, headers=None): :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_upgrade"), params=params, headers=headers ) @query_params( "allow_no_indices", "expand_wildcards", "ignore_unavailable", "status" ) - def shard_stores(self, index=None, params=None, headers=None): + async def shard_stores(self, index=None, params=None, headers=None): """ Provides store information for shard copies of indices. ``_ @@ -882,7 +882,7 @@ def shard_stores(self, index=None, params=None, headers=None): on shards to get store information for Valid choices: green, yellow, red, all """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_shard_stores"), params=params, headers=headers ) @@ -894,7 +894,7 @@ def shard_stores(self, index=None, params=None, headers=None): "max_num_segments", "only_expunge_deletes", ) - def forcemerge(self, index=None, params=None, headers=None): + async def forcemerge(self, index=None, params=None, headers=None): """ Performs the force merge operation on one or more indices. ``_ @@ -916,12 +916,12 @@ def forcemerge(self, index=None, params=None, headers=None): :arg only_expunge_deletes: Specify whether the operation should only expunge deleted documents """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_forcemerge"), params=params, headers=headers ) @query_params("master_timeout", "timeout", "wait_for_active_shards") - def shrink(self, index, target, body=None, params=None, headers=None): + async def shrink(self, index, target, body=None, params=None, headers=None): """ Allow to shrink an existing index into a new index with fewer primary shards. ``_ @@ -939,7 +939,7 @@ def shrink(self, index, target, body=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index, "_shrink", target), params=params, @@ -948,7 +948,7 @@ def shrink(self, index, target, body=None, params=None, headers=None): ) @query_params("master_timeout", "timeout", "wait_for_active_shards") - def split(self, index, target, body=None, params=None, headers=None): + async def split(self, index, target, body=None, params=None, headers=None): """ Allows you to split an existing index into a new index with more primary shards. @@ -967,7 +967,7 @@ def split(self, index, target, body=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path(index, "_split", target), params=params, @@ -976,7 +976,9 @@ def split(self, index, target, body=None, params=None, headers=None): ) @query_params("dry_run", "master_timeout", "timeout", "wait_for_active_shards") - def rollover(self, alias, body=None, new_index=None, params=None, headers=None): + async def rollover( + self, alias, body=None, new_index=None, params=None, headers=None + ): """ Updates an alias to point to a new index when the existing index is considered to be too large or too old. @@ -998,7 +1000,7 @@ def rollover(self, alias, body=None, new_index=None, params=None, headers=None): if alias in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'alias'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(alias, "_rollover", new_index), params=params, @@ -1014,7 +1016,7 @@ def rollover(self, alias, body=None, new_index=None, params=None, headers=None): "timeout", "wait_for_active_shards", ) - def freeze(self, index, params=None, headers=None): + async def freeze(self, index, params=None, headers=None): """ Freezes an index. A frozen index has almost no overhead on the cluster (except for maintaining its metadata in memory) and is read-only. @@ -1037,7 +1039,7 @@ def freeze(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_freeze"), params=params, headers=headers ) @@ -1049,7 +1051,7 @@ def freeze(self, index, params=None, headers=None): "timeout", "wait_for_active_shards", ) - def unfreeze(self, index, params=None, headers=None): + async def unfreeze(self, index, params=None, headers=None): """ Unfreezes an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. @@ -1072,12 +1074,12 @@ def unfreeze(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_unfreeze"), params=params, headers=headers ) @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") - def reload_search_analyzers(self, index, params=None, headers=None): + async def reload_search_analyzers(self, index, params=None, headers=None): """ Reloads an index's search analyzers and their resources. ``_ @@ -1096,7 +1098,7 @@ def reload_search_analyzers(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_reload_search_analyzers"), params=params, @@ -1110,7 +1112,7 @@ def reload_search_analyzers(self, index, params=None, headers=None): "include_defaults", "local", ) - def get_field_mapping(self, fields, index=None, params=None, headers=None): + async def get_field_mapping(self, fields, index=None, params=None, headers=None): """ Returns mapping for one or more fields. ``_ @@ -1133,7 +1135,7 @@ def get_field_mapping(self, fields, index=None, params=None, headers=None): if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'fields'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_mapping", "field", fields), params=params, @@ -1154,7 +1156,7 @@ def get_field_mapping(self, fields, index=None, params=None, headers=None): "q", "rewrite", ) - def validate_query( + async def validate_query( self, body=None, index=None, doc_type=None, params=None, headers=None ): """ @@ -1192,7 +1194,7 @@ def validate_query( :arg rewrite: Provide a more detailed explanation showing the actual Lucene query that will be executed. """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, doc_type, "_validate", "query"), params=params, @@ -1201,7 +1203,7 @@ def validate_query( ) @query_params() - def create_data_stream(self, name, body, params=None, headers=None): + async def create_data_stream(self, name, body, params=None, headers=None): """ Creates or updates a data stream ``_ @@ -1213,7 +1215,7 @@ def create_data_stream(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_data_stream", name), params=params, @@ -1222,7 +1224,7 @@ def create_data_stream(self, name, body, params=None, headers=None): ) @query_params() - def delete_data_stream(self, name, params=None, headers=None): + async def delete_data_stream(self, name, params=None, headers=None): """ Deletes a data stream. ``_ @@ -1232,25 +1234,12 @@ def delete_data_stream(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_data_stream", name), params=params, headers=headers ) - @query_params() - def get_data_streams(self, name=None, params=None, headers=None): - """ - Returns data streams. - ``_ - - :arg name: The name or wildcard expression of the requested data - streams - """ - return self.transport.perform_request( - "GET", _make_path("_data_streams", name), params=params, headers=headers - ) - @query_params("master_timeout", "timeout") - def delete_index_template(self, name, params=None, headers=None): + async def delete_index_template(self, name, params=None, headers=None): """ Deletes an index template. ``_ @@ -1262,7 +1251,7 @@ def delete_index_template(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_index_template", name), params=params, @@ -1270,7 +1259,7 @@ def delete_index_template(self, name, params=None, headers=None): ) @query_params("flat_settings", "local", "master_timeout") - def get_index_template(self, name=None, params=None, headers=None): + async def get_index_template(self, name=None, params=None, headers=None): """ Returns an index template. ``_ @@ -1283,12 +1272,12 @@ def get_index_template(self, name=None, params=None, headers=None): :arg master_timeout: Explicit operation timeout for connection to master node """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_index_template", name), params=params, headers=headers ) @query_params("cause", "create", "master_timeout") - def put_index_template(self, name, body, params=None, headers=None): + async def put_index_template(self, name, body, params=None, headers=None): """ Creates or updates an index template. ``_ @@ -1305,7 +1294,7 @@ def put_index_template(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_index_template", name), params=params, @@ -1314,7 +1303,7 @@ def put_index_template(self, name, body, params=None, headers=None): ) @query_params("flat_settings", "local", "master_timeout") - def exists_index_template(self, name, params=None, headers=None): + async def exists_index_template(self, name, params=None, headers=None): """ Returns information about whether a particular index template exists. ``_ @@ -1330,12 +1319,12 @@ def exists_index_template(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "HEAD", _make_path("_index_template", name), params=params, headers=headers ) @query_params("cause", "create", "master_timeout") - def simulate_index_template(self, name, body=None, params=None, headers=None): + async def simulate_index_template(self, name, body=None, params=None, headers=None): """ Simulate matching the given index name against the index templates in the system @@ -1355,10 +1344,47 @@ def simulate_index_template(self, name, body=None, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_index_template", "_simulate_index", name), params=params, headers=headers, body=body, ) + + @query_params() + async def get_data_stream(self, name=None, params=None, headers=None): + """ + Returns data streams. + ``_ + + :arg name: The name or wildcard expression of the requested data + streams + """ + return await self.transport.perform_request( + "GET", _make_path("_data_stream", name), params=params, headers=headers + ) + + @query_params("cause", "create", "master_timeout") + async def simulate_template(self, body=None, name=None, params=None, headers=None): + """ + Simulate resolving the given template name or body + ``_ + + :arg body: New index template definition to be simulated, if no + index template name is specified + :arg name: The name of the index template + :arg cause: User defined reason for dry-run creating the new + template for simulation purposes + :arg create: Whether the index template we optionally defined in + the body should only be dry-run added if new or can also replace an + existing one + :arg master_timeout: Specify timeout for connection to master + """ + return await self.transport.perform_request( + "POST", + _make_path("_index_template", "_simulate", name), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index c30c41dfb..6a0818027 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -7,7 +7,7 @@ class IngestClient(NamespacedClient): @query_params("master_timeout") - def get_pipeline(self, id=None, params=None, headers=None): + async def get_pipeline(self, id=None, params=None, headers=None): """ Returns a pipeline. ``_ @@ -17,12 +17,12 @@ def get_pipeline(self, id=None, params=None, headers=None): :arg master_timeout: Explicit operation timeout for connection to master node """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ingest", "pipeline", id), params=params, headers=headers ) @query_params("master_timeout", "timeout") - def put_pipeline(self, id, body, params=None, headers=None): + async def put_pipeline(self, id, body, params=None, headers=None): """ Creates or updates a pipeline. ``_ @@ -37,7 +37,7 @@ def put_pipeline(self, id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ingest", "pipeline", id), params=params, @@ -46,7 +46,7 @@ def put_pipeline(self, id, body, params=None, headers=None): ) @query_params("master_timeout", "timeout") - def delete_pipeline(self, id, params=None, headers=None): + async def delete_pipeline(self, id, params=None, headers=None): """ Deletes a pipeline. ``_ @@ -59,7 +59,7 @@ def delete_pipeline(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ingest", "pipeline", id), params=params, @@ -67,7 +67,7 @@ def delete_pipeline(self, id, params=None, headers=None): ) @query_params("verbose") - def simulate(self, body, id=None, params=None, headers=None): + async def simulate(self, body, id=None, params=None, headers=None): """ Allows to simulate a pipeline with example documents. ``_ @@ -80,7 +80,7 @@ def simulate(self, body, id=None, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ingest", "pipeline", id, "_simulate"), params=params, @@ -89,11 +89,11 @@ def simulate(self, body, id=None, params=None, headers=None): ) @query_params() - def processor_grok(self, params=None, headers=None): + async def processor_grok(self, params=None, headers=None): """ Returns a list of the built-in patterns. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_ingest/processor/grok", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index e455725af..37c06a330 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -7,17 +7,17 @@ class LicenseClient(NamespacedClient): @query_params() - def delete(self, params=None, headers=None): + async def delete(self, params=None, headers=None): """ Deletes licensing information for the cluster ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", "/_license", params=params, headers=headers ) @query_params("accept_enterprise", "local") - def get(self, params=None, headers=None): + async def get(self, params=None, headers=None): """ Retrieves licensing information for the cluster ``_ @@ -27,32 +27,32 @@ def get(self, params=None, headers=None): :arg local: Return local information, do not retrieve the state from master node (default: false) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_license", params=params, headers=headers ) @query_params() - def get_basic_status(self, params=None, headers=None): + async def get_basic_status(self, params=None, headers=None): """ Retrieves information about the status of the basic license. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_license/basic_status", params=params, headers=headers ) @query_params() - def get_trial_status(self, params=None, headers=None): + async def get_trial_status(self, params=None, headers=None): """ Retrieves information about the status of the trial license. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_license/trial_status", params=params, headers=headers ) @query_params("acknowledge") - def post(self, body=None, params=None, headers=None): + async def post(self, body=None, params=None, headers=None): """ Updates the license for the cluster. ``_ @@ -61,12 +61,12 @@ def post(self, body=None, params=None, headers=None): :arg acknowledge: whether the user has acknowledged acknowledge messages (default: false) """ - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", "/_license", params=params, headers=headers, body=body ) @query_params("acknowledge") - def post_start_basic(self, params=None, headers=None): + async def post_start_basic(self, params=None, headers=None): """ Starts an indefinite basic license. ``_ @@ -74,12 +74,12 @@ def post_start_basic(self, params=None, headers=None): :arg acknowledge: whether the user has acknowledged acknowledge messages (default: false) """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_license/start_basic", params=params, headers=headers ) @query_params("acknowledge", "doc_type") - def post_start_trial(self, params=None, headers=None): + async def post_start_trial(self, params=None, headers=None): """ starts a limited time trial license. ``_ @@ -93,6 +93,6 @@ def post_start_trial(self, params=None, headers=None): if "doc_type" in params: params["type"] = params.pop("doc_type") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_license/start_trial", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index c58b987d7..443d15604 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -7,7 +7,7 @@ class MigrationClient(NamespacedClient): @query_params() - def deprecations(self, index=None, params=None, headers=None): + async def deprecations(self, index=None, params=None, headers=None): """ Retrieves information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major @@ -16,7 +16,7 @@ def deprecations(self, index=None, params=None, headers=None): :arg index: Index pattern """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_migration", "deprecations"), params=params, diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index ce594396a..4e589a83f 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -7,7 +7,7 @@ class MlClient(NamespacedClient): @query_params("allow_no_jobs", "force", "timeout") - def close_job(self, job_id, body=None, params=None, headers=None): + async def close_job(self, job_id, body=None, params=None, headers=None): """ Closes one or more anomaly detection jobs. A job can be opened and closed multiple times throughout its lifecycle. @@ -25,7 +25,7 @@ def close_job(self, job_id, body=None, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_close"), params=params, @@ -34,7 +34,7 @@ def close_job(self, job_id, body=None, params=None, headers=None): ) @query_params() - def delete_calendar(self, calendar_id, params=None, headers=None): + async def delete_calendar(self, calendar_id, params=None, headers=None): """ Deletes a calendar. ``_ @@ -46,7 +46,7 @@ def delete_calendar(self, calendar_id, params=None, headers=None): "Empty value passed for a required argument 'calendar_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "calendars", calendar_id), params=params, @@ -54,7 +54,9 @@ def delete_calendar(self, calendar_id, params=None, headers=None): ) @query_params() - def delete_calendar_event(self, calendar_id, event_id, params=None, headers=None): + async def delete_calendar_event( + self, calendar_id, event_id, params=None, headers=None + ): """ Deletes scheduled events from a calendar. ``_ @@ -66,7 +68,7 @@ def delete_calendar_event(self, calendar_id, event_id, params=None, headers=None if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "calendars", calendar_id, "events", event_id), params=params, @@ -74,7 +76,7 @@ def delete_calendar_event(self, calendar_id, event_id, params=None, headers=None ) @query_params() - def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None): + async def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None): """ Deletes anomaly detection jobs from a calendar. ``_ @@ -86,7 +88,7 @@ def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "calendars", calendar_id, "jobs", job_id), params=params, @@ -94,7 +96,7 @@ def delete_calendar_job(self, calendar_id, job_id, params=None, headers=None): ) @query_params("force") - def delete_datafeed(self, datafeed_id, params=None, headers=None): + async def delete_datafeed(self, datafeed_id, params=None, headers=None): """ Deletes an existing datafeed. ``_ @@ -107,7 +109,7 @@ def delete_datafeed(self, datafeed_id, params=None, headers=None): "Empty value passed for a required argument 'datafeed_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "datafeeds", datafeed_id), params=params, @@ -115,17 +117,23 @@ def delete_datafeed(self, datafeed_id, params=None, headers=None): ) @query_params() - def delete_expired_data(self, params=None, headers=None): + async def delete_expired_data(self, body=None, params=None, headers=None): """ Deletes expired and unused machine learning data. ``_ + + :arg body: deleting expired data parameters """ - return self.transport.perform_request( - "DELETE", "/_ml/_delete_expired_data", params=params, headers=headers + return await self.transport.perform_request( + "DELETE", + "/_ml/_delete_expired_data", + params=params, + headers=headers, + body=body, ) @query_params() - def delete_filter(self, filter_id, params=None, headers=None): + async def delete_filter(self, filter_id, params=None, headers=None): """ Deletes a filter. ``_ @@ -135,7 +143,7 @@ def delete_filter(self, filter_id, params=None, headers=None): if filter_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'filter_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "filters", filter_id), params=params, @@ -143,7 +151,9 @@ def delete_filter(self, filter_id, params=None, headers=None): ) @query_params("allow_no_forecasts", "timeout") - def delete_forecast(self, job_id, forecast_id=None, params=None, headers=None): + async def delete_forecast( + self, job_id, forecast_id=None, params=None, headers=None + ): """ Deletes forecasts from a machine learning job. ``_ @@ -159,7 +169,7 @@ def delete_forecast(self, job_id, forecast_id=None, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "anomaly_detectors", job_id, "_forecast", forecast_id), params=params, @@ -167,7 +177,7 @@ def delete_forecast(self, job_id, forecast_id=None, params=None, headers=None): ) @query_params("force", "wait_for_completion") - def delete_job(self, job_id, params=None, headers=None): + async def delete_job(self, job_id, params=None, headers=None): """ Deletes an existing anomaly detection job. ``_ @@ -180,7 +190,7 @@ def delete_job(self, job_id, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "anomaly_detectors", job_id), params=params, @@ -188,7 +198,9 @@ def delete_job(self, job_id, params=None, headers=None): ) @query_params() - def delete_model_snapshot(self, job_id, snapshot_id, params=None, headers=None): + async def delete_model_snapshot( + self, job_id, snapshot_id, params=None, headers=None + ): """ Deletes an existing model snapshot. ``_ @@ -200,7 +212,7 @@ def delete_model_snapshot(self, job_id, snapshot_id, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path( "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id @@ -225,7 +237,7 @@ def delete_model_snapshot(self, job_id, snapshot_id, params=None, headers=None): "timestamp_field", "timestamp_format", ) - def find_file_structure(self, body, params=None, headers=None): + async def find_file_structure(self, body, params=None, headers=None): """ Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. @@ -268,7 +280,7 @@ def find_file_structure(self, body, params=None, headers=None): raise ValueError("Empty value passed for a required argument 'body'.") body = _bulk_body(self.transport.serializer, body) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ml/find_file_structure", params=params, @@ -277,7 +289,7 @@ def find_file_structure(self, body, params=None, headers=None): ) @query_params("advance_time", "calc_interim", "end", "skip_time", "start") - def flush_job(self, job_id, body=None, params=None, headers=None): + async def flush_job(self, job_id, body=None, params=None, headers=None): """ Forces any buffered data to be processed by the job. ``_ @@ -298,7 +310,7 @@ def flush_job(self, job_id, body=None, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_flush"), params=params, @@ -307,7 +319,7 @@ def flush_job(self, job_id, body=None, params=None, headers=None): ) @query_params("duration", "expires_in") - def forecast(self, job_id, params=None, headers=None): + async def forecast(self, job_id, params=None, headers=None): """ Predicts the future behavior of a time series by using its historical behavior. ``_ @@ -320,7 +332,7 @@ def forecast(self, job_id, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_forecast"), params=params, @@ -338,7 +350,9 @@ def forecast(self, job_id, params=None, headers=None): "sort", "start", ) - def get_buckets(self, job_id, body=None, timestamp=None, params=None, headers=None): + async def get_buckets( + self, job_id, body=None, timestamp=None, params=None, headers=None + ): """ Retrieves anomaly detection job results for one or more buckets. ``_ @@ -364,7 +378,7 @@ def get_buckets(self, job_id, body=None, timestamp=None, params=None, headers=No if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path( "_ml", "anomaly_detectors", job_id, "results", "buckets", timestamp @@ -375,7 +389,7 @@ def get_buckets(self, job_id, body=None, timestamp=None, params=None, headers=No ) @query_params("end", "from_", "job_id", "size", "start") - def get_calendar_events(self, calendar_id, params=None, headers=None): + async def get_calendar_events(self, calendar_id, params=None, headers=None): """ Retrieves information about the scheduled events in calendars. ``_ @@ -397,7 +411,7 @@ def get_calendar_events(self, calendar_id, params=None, headers=None): "Empty value passed for a required argument 'calendar_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "calendars", calendar_id, "events"), params=params, @@ -405,7 +419,9 @@ def get_calendar_events(self, calendar_id, params=None, headers=None): ) @query_params("from_", "size") - def get_calendars(self, body=None, calendar_id=None, params=None, headers=None): + async def get_calendars( + self, body=None, calendar_id=None, params=None, headers=None + ): """ Retrieves configuration information for calendars. ``_ @@ -420,7 +436,7 @@ def get_calendars(self, body=None, calendar_id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "calendars", calendar_id), params=params, @@ -429,7 +445,7 @@ def get_calendars(self, body=None, calendar_id=None, params=None, headers=None): ) @query_params("allow_no_datafeeds") - def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None): + async def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None): """ Retrieves usage information for datafeeds. ``_ @@ -439,7 +455,7 @@ def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None): expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "datafeeds", datafeed_id, "_stats"), params=params, @@ -447,7 +463,7 @@ def get_datafeed_stats(self, datafeed_id=None, params=None, headers=None): ) @query_params("allow_no_datafeeds") - def get_datafeeds(self, datafeed_id=None, params=None, headers=None): + async def get_datafeeds(self, datafeed_id=None, params=None, headers=None): """ Retrieves configuration information for datafeeds. ``_ @@ -457,7 +473,7 @@ def get_datafeeds(self, datafeed_id=None, params=None, headers=None): expression matches no datafeeds. (This includes `_all` string or when no datafeeds have been specified) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "datafeeds", datafeed_id), params=params, @@ -465,7 +481,7 @@ def get_datafeeds(self, datafeed_id=None, params=None, headers=None): ) @query_params("from_", "size") - def get_filters(self, filter_id=None, params=None, headers=None): + async def get_filters(self, filter_id=None, params=None, headers=None): """ Retrieves filters. ``_ @@ -478,7 +494,7 @@ def get_filters(self, filter_id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "filters", filter_id), params=params, @@ -495,7 +511,7 @@ def get_filters(self, filter_id=None, params=None, headers=None): "sort", "start", ) - def get_influencers(self, job_id, body=None, params=None, headers=None): + async def get_influencers(self, job_id, body=None, params=None, headers=None): """ Retrieves anomaly detection job results for one or more influencers. ``_ @@ -520,7 +536,7 @@ def get_influencers(self, job_id, body=None, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "results", "influencers"), params=params, @@ -529,7 +545,7 @@ def get_influencers(self, job_id, body=None, params=None, headers=None): ) @query_params("allow_no_jobs") - def get_job_stats(self, job_id=None, params=None, headers=None): + async def get_job_stats(self, job_id=None, params=None, headers=None): """ Retrieves usage information for anomaly detection jobs. ``_ @@ -539,7 +555,7 @@ def get_job_stats(self, job_id=None, params=None, headers=None): matches no jobs. (This includes `_all` string or when no jobs have been specified) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "anomaly_detectors", job_id, "_stats"), params=params, @@ -547,7 +563,7 @@ def get_job_stats(self, job_id=None, params=None, headers=None): ) @query_params("allow_no_jobs") - def get_jobs(self, job_id=None, params=None, headers=None): + async def get_jobs(self, job_id=None, params=None, headers=None): """ Retrieves configuration information for anomaly detection jobs. ``_ @@ -557,7 +573,7 @@ def get_jobs(self, job_id=None, params=None, headers=None): matches no jobs. (This includes `_all` string or when no jobs have been specified) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "anomaly_detectors", job_id), params=params, @@ -573,7 +589,7 @@ def get_jobs(self, job_id=None, params=None, headers=None): "start", "top_n", ) - def get_overall_buckets(self, job_id, body=None, params=None, headers=None): + async def get_overall_buckets(self, job_id, body=None, params=None, headers=None): """ Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. @@ -602,7 +618,7 @@ def get_overall_buckets(self, job_id, body=None, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path( "_ml", "anomaly_detectors", job_id, "results", "overall_buckets" @@ -622,7 +638,7 @@ def get_overall_buckets(self, job_id, body=None, params=None, headers=None): "sort", "start", ) - def get_records(self, job_id, body=None, params=None, headers=None): + async def get_records(self, job_id, body=None, params=None, headers=None): """ Retrieves anomaly records for an anomaly detection job. ``_ @@ -646,7 +662,7 @@ def get_records(self, job_id, body=None, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "results", "records"), params=params, @@ -655,17 +671,17 @@ def get_records(self, job_id, body=None, params=None, headers=None): ) @query_params() - def info(self, params=None, headers=None): + async def info(self, params=None, headers=None): """ Returns defaults and limits used by machine learning. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_ml/info", params=params, headers=headers ) @query_params() - def open_job(self, job_id, params=None, headers=None): + async def open_job(self, job_id, params=None, headers=None): """ Opens one or more anomaly detection jobs. ``_ @@ -675,7 +691,7 @@ def open_job(self, job_id, params=None, headers=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_open"), params=params, @@ -683,7 +699,7 @@ def open_job(self, job_id, params=None, headers=None): ) @query_params() - def post_calendar_events(self, calendar_id, body, params=None, headers=None): + async def post_calendar_events(self, calendar_id, body, params=None, headers=None): """ Posts scheduled events in a calendar. ``_ @@ -695,7 +711,7 @@ def post_calendar_events(self, calendar_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "calendars", calendar_id, "events"), params=params, @@ -704,7 +720,7 @@ def post_calendar_events(self, calendar_id, body, params=None, headers=None): ) @query_params("reset_end", "reset_start") - def post_data(self, job_id, body, params=None, headers=None): + async def post_data(self, job_id, body, params=None, headers=None): """ Sends data to an anomaly detection job for analysis. ``_ @@ -721,7 +737,7 @@ def post_data(self, job_id, body, params=None, headers=None): raise ValueError("Empty value passed for a required argument.") body = _bulk_body(self.transport.serializer, body) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_data"), params=params, @@ -730,7 +746,7 @@ def post_data(self, job_id, body, params=None, headers=None): ) @query_params() - def preview_datafeed(self, datafeed_id, params=None, headers=None): + async def preview_datafeed(self, datafeed_id, params=None, headers=None): """ Previews a datafeed. ``_ @@ -742,7 +758,7 @@ def preview_datafeed(self, datafeed_id, params=None, headers=None): "Empty value passed for a required argument 'datafeed_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "datafeeds", datafeed_id, "_preview"), params=params, @@ -750,7 +766,7 @@ def preview_datafeed(self, datafeed_id, params=None, headers=None): ) @query_params() - def put_calendar(self, calendar_id, body=None, params=None, headers=None): + async def put_calendar(self, calendar_id, body=None, params=None, headers=None): """ Instantiates a calendar. ``_ @@ -763,7 +779,7 @@ def put_calendar(self, calendar_id, body=None, params=None, headers=None): "Empty value passed for a required argument 'calendar_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ml", "calendars", calendar_id), params=params, @@ -772,7 +788,7 @@ def put_calendar(self, calendar_id, body=None, params=None, headers=None): ) @query_params() - def put_calendar_job(self, calendar_id, job_id, params=None, headers=None): + async def put_calendar_job(self, calendar_id, job_id, params=None, headers=None): """ Adds an anomaly detection job to a calendar. ``_ @@ -784,7 +800,7 @@ def put_calendar_job(self, calendar_id, job_id, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ml", "calendars", calendar_id, "jobs", job_id), params=params, @@ -794,7 +810,7 @@ def put_calendar_job(self, calendar_id, job_id, params=None, headers=None): @query_params( "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable" ) - def put_datafeed(self, datafeed_id, body, params=None, headers=None): + async def put_datafeed(self, datafeed_id, body, params=None, headers=None): """ Instantiates a datafeed. ``_ @@ -815,7 +831,7 @@ def put_datafeed(self, datafeed_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ml", "datafeeds", datafeed_id), params=params, @@ -824,7 +840,7 @@ def put_datafeed(self, datafeed_id, body, params=None, headers=None): ) @query_params() - def put_filter(self, filter_id, body, params=None, headers=None): + async def put_filter(self, filter_id, body, params=None, headers=None): """ Instantiates a filter. ``_ @@ -836,7 +852,7 @@ def put_filter(self, filter_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ml", "filters", filter_id), params=params, @@ -845,7 +861,7 @@ def put_filter(self, filter_id, body, params=None, headers=None): ) @query_params() - def put_job(self, job_id, body, params=None, headers=None): + async def put_job(self, job_id, body, params=None, headers=None): """ Instantiates an anomaly detection job. ``_ @@ -857,7 +873,7 @@ def put_job(self, job_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ml", "anomaly_detectors", job_id), params=params, @@ -866,7 +882,7 @@ def put_job(self, job_id, body, params=None, headers=None): ) @query_params("enabled", "timeout") - def set_upgrade_mode(self, params=None, headers=None): + async def set_upgrade_mode(self, params=None, headers=None): """ Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. @@ -877,12 +893,12 @@ def set_upgrade_mode(self, params=None, headers=None): :arg timeout: Controls the time to wait before action times out. Defaults to 30 seconds """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ml/set_upgrade_mode", params=params, headers=headers ) @query_params("end", "start", "timeout") - def start_datafeed(self, datafeed_id, body=None, params=None, headers=None): + async def start_datafeed(self, datafeed_id, body=None, params=None, headers=None): """ Starts one or more datafeeds. ``_ @@ -900,7 +916,7 @@ def start_datafeed(self, datafeed_id, body=None, params=None, headers=None): "Empty value passed for a required argument 'datafeed_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "datafeeds", datafeed_id, "_start"), params=params, @@ -909,7 +925,7 @@ def start_datafeed(self, datafeed_id, body=None, params=None, headers=None): ) @query_params("allow_no_datafeeds", "force", "timeout") - def stop_datafeed(self, datafeed_id, params=None, headers=None): + async def stop_datafeed(self, datafeed_id, params=None, headers=None): """ Stops one or more datafeeds. ``_ @@ -927,7 +943,7 @@ def stop_datafeed(self, datafeed_id, params=None, headers=None): "Empty value passed for a required argument 'datafeed_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "datafeeds", datafeed_id, "_stop"), params=params, @@ -937,7 +953,7 @@ def stop_datafeed(self, datafeed_id, params=None, headers=None): @query_params( "allow_no_indices", "expand_wildcards", "ignore_throttled", "ignore_unavailable" ) - def update_datafeed(self, datafeed_id, body, params=None, headers=None): + async def update_datafeed(self, datafeed_id, body, params=None, headers=None): """ Updates certain properties of a datafeed. ``_ @@ -958,7 +974,7 @@ def update_datafeed(self, datafeed_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "datafeeds", datafeed_id, "_update"), params=params, @@ -967,7 +983,7 @@ def update_datafeed(self, datafeed_id, body, params=None, headers=None): ) @query_params() - def update_filter(self, filter_id, body, params=None, headers=None): + async def update_filter(self, filter_id, body, params=None, headers=None): """ Updates the description of a filter, adds items, or removes items. ``_ @@ -979,7 +995,7 @@ def update_filter(self, filter_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "filters", filter_id, "_update"), params=params, @@ -988,7 +1004,7 @@ def update_filter(self, filter_id, body, params=None, headers=None): ) @query_params() - def update_job(self, job_id, body, params=None, headers=None): + async def update_job(self, job_id, body, params=None, headers=None): """ Updates certain properties of an anomaly detection job. ``_ @@ -1000,7 +1016,7 @@ def update_job(self, job_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_update"), params=params, @@ -1009,16 +1025,17 @@ def update_job(self, job_id, body, params=None, headers=None): ) @query_params() - def validate(self, body, params=None, headers=None): + async def validate(self, body, params=None, headers=None): """ Validates an anomaly detection job. + ``_ :arg body: The job config """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ml/anomaly_detectors/_validate", params=params, @@ -1027,16 +1044,17 @@ def validate(self, body, params=None, headers=None): ) @query_params() - def validate_detector(self, body, params=None, headers=None): + async def validate_detector(self, body, params=None, headers=None): """ Validates an anomaly detection detector. + ``_ :arg body: The detector """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ml/anomaly_detectors/_validate/detector", params=params, @@ -1045,7 +1063,7 @@ def validate_detector(self, body, params=None, headers=None): ) @query_params("force") - def delete_data_frame_analytics(self, id, params=None, headers=None): + async def delete_data_frame_analytics(self, id, params=None, headers=None): """ Deletes an existing data frame analytics job. ``_ @@ -1056,7 +1074,7 @@ def delete_data_frame_analytics(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "data_frame", "analytics", id), params=params, @@ -1064,7 +1082,7 @@ def delete_data_frame_analytics(self, id, params=None, headers=None): ) @query_params() - def evaluate_data_frame(self, body, params=None, headers=None): + async def evaluate_data_frame(self, body, params=None, headers=None): """ Evaluates the data frame analytics for an annotated index. ``_ @@ -1074,7 +1092,7 @@ def evaluate_data_frame(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ml/data_frame/_evaluate", params=params, @@ -1083,7 +1101,7 @@ def evaluate_data_frame(self, body, params=None, headers=None): ) @query_params("allow_no_match", "from_", "size") - def get_data_frame_analytics(self, id=None, params=None, headers=None): + async def get_data_frame_analytics(self, id=None, params=None, headers=None): """ Retrieves configuration information for data frame analytics jobs. ``_ @@ -1100,7 +1118,7 @@ def get_data_frame_analytics(self, id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "data_frame", "analytics", id), params=params, @@ -1108,7 +1126,7 @@ def get_data_frame_analytics(self, id=None, params=None, headers=None): ) @query_params("allow_no_match", "from_", "size") - def get_data_frame_analytics_stats(self, id=None, params=None, headers=None): + async def get_data_frame_analytics_stats(self, id=None, params=None, headers=None): """ Retrieves usage information for data frame analytics jobs. ``_ @@ -1125,7 +1143,7 @@ def get_data_frame_analytics_stats(self, id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "data_frame", "analytics", id, "_stats"), params=params, @@ -1133,7 +1151,7 @@ def get_data_frame_analytics_stats(self, id=None, params=None, headers=None): ) @query_params() - def put_data_frame_analytics(self, id, body, params=None, headers=None): + async def put_data_frame_analytics(self, id, body, params=None, headers=None): """ Instantiates a data frame analytics job. ``_ @@ -1145,7 +1163,7 @@ def put_data_frame_analytics(self, id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ml", "data_frame", "analytics", id), params=params, @@ -1154,7 +1172,9 @@ def put_data_frame_analytics(self, id, body, params=None, headers=None): ) @query_params("timeout") - def start_data_frame_analytics(self, id, body=None, params=None, headers=None): + async def start_data_frame_analytics( + self, id, body=None, params=None, headers=None + ): """ Starts a data frame analytics job. ``_ @@ -1167,7 +1187,7 @@ def start_data_frame_analytics(self, id, body=None, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "data_frame", "analytics", id, "_start"), params=params, @@ -1176,7 +1196,7 @@ def start_data_frame_analytics(self, id, body=None, params=None, headers=None): ) @query_params("allow_no_match", "force", "timeout") - def stop_data_frame_analytics(self, id, body=None, params=None, headers=None): + async def stop_data_frame_analytics(self, id, body=None, params=None, headers=None): """ Stops one or more data frame analytics jobs. ``_ @@ -1194,7 +1214,7 @@ def stop_data_frame_analytics(self, id, body=None, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "data_frame", "analytics", id, "_stop"), params=params, @@ -1203,7 +1223,7 @@ def stop_data_frame_analytics(self, id, body=None, params=None, headers=None): ) @query_params() - def delete_trained_model(self, model_id, params=None, headers=None): + async def delete_trained_model(self, model_id, params=None, headers=None): """ Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. @@ -1214,7 +1234,7 @@ def delete_trained_model(self, model_id, params=None, headers=None): if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'model_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_ml", "inference", model_id), params=params, @@ -1229,7 +1249,7 @@ def delete_trained_model(self, model_id, params=None, headers=None): "size", "tags", ) - def get_trained_models(self, model_id=None, params=None, headers=None): + async def get_trained_models(self, model_id=None, params=None, headers=None): """ Retrieves configuration information for a trained inference model. ``_ @@ -1254,7 +1274,7 @@ def get_trained_models(self, model_id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "inference", model_id), params=params, @@ -1262,7 +1282,7 @@ def get_trained_models(self, model_id=None, params=None, headers=None): ) @query_params("allow_no_match", "from_", "size") - def get_trained_models_stats(self, model_id=None, params=None, headers=None): + async def get_trained_models_stats(self, model_id=None, params=None, headers=None): """ Retrieves usage information for trained inference models. ``_ @@ -1279,7 +1299,7 @@ def get_trained_models_stats(self, model_id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_ml", "inference", model_id, "_stats"), params=params, @@ -1287,7 +1307,7 @@ def get_trained_models_stats(self, model_id=None, params=None, headers=None): ) @query_params() - def put_trained_model(self, model_id, body, params=None, headers=None): + async def put_trained_model(self, model_id, body, params=None, headers=None): """ Creates an inference trained model. ``_ @@ -1299,7 +1319,7 @@ def put_trained_model(self, model_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_ml", "inference", model_id), params=params, @@ -1308,7 +1328,7 @@ def put_trained_model(self, model_id, body, params=None, headers=None): ) @query_params() - def estimate_model_memory(self, body, params=None, headers=None): + async def estimate_model_memory(self, body, params=None, headers=None): """ Estimates the model memory ``_ @@ -1319,7 +1339,7 @@ def estimate_model_memory(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_ml/anomaly_detectors/_estimate_model_memory", params=params, @@ -1328,7 +1348,7 @@ def estimate_model_memory(self, body, params=None, headers=None): ) @query_params() - def explain_data_frame_analytics( + async def explain_data_frame_analytics( self, body=None, id=None, params=None, headers=None ): """ @@ -1338,7 +1358,7 @@ def explain_data_frame_analytics( :arg body: The data frame analytics config to explain :arg id: The ID of the data frame analytics to explain """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_ml", "data_frame", "analytics", id, "_explain"), params=params, @@ -1347,7 +1367,7 @@ def explain_data_frame_analytics( ) @query_params("from_", "size") - def get_categories( + async def get_categories( self, job_id, body=None, category_id=None, params=None, headers=None ): """ @@ -1368,7 +1388,7 @@ def get_categories( if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path( "_ml", "anomaly_detectors", job_id, "results", "categories", category_id @@ -1379,7 +1399,7 @@ def get_categories( ) @query_params("desc", "end", "from_", "size", "sort", "start") - def get_model_snapshots( + async def get_model_snapshots( self, job_id, body=None, snapshot_id=None, params=None, headers=None ): """ @@ -1405,7 +1425,7 @@ def get_model_snapshots( if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'job_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path( "_ml", "anomaly_detectors", job_id, "model_snapshots", snapshot_id @@ -1416,7 +1436,7 @@ def get_model_snapshots( ) @query_params("delete_intervening_results") - def revert_model_snapshot( + async def revert_model_snapshot( self, job_id, snapshot_id, body=None, params=None, headers=None ): """ @@ -1433,7 +1453,7 @@ def revert_model_snapshot( if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path( "_ml", @@ -1449,7 +1469,7 @@ def revert_model_snapshot( ) @query_params() - def update_model_snapshot( + async def update_model_snapshot( self, job_id, snapshot_id, body, params=None, headers=None ): """ @@ -1464,7 +1484,7 @@ def update_model_snapshot( if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path( "_ml", diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index cf5677cd3..2f7b570d2 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -7,7 +7,7 @@ class MonitoringClient(NamespacedClient): @query_params("interval", "system_api_version", "system_id") - def bulk(self, body, doc_type=None, params=None, headers=None): + async def bulk(self, body, doc_type=None, params=None, headers=None): """ Used by the monitoring features to send monitoring data. ``_ @@ -25,7 +25,7 @@ def bulk(self, body, doc_type=None, params=None, headers=None): raise ValueError("Empty value passed for a required argument 'body'.") body = _bulk_body(self.transport.serializer, body) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_monitoring", doc_type, "bulk"), params=params, diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index c3aaec3af..51515b128 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -7,7 +7,7 @@ class NodesClient(NamespacedClient): @query_params("timeout") - def reload_secure_settings( + async def reload_secure_settings( self, body=None, node_id=None, params=None, headers=None ): """ @@ -21,7 +21,7 @@ def reload_secure_settings( all cluster nodes. :arg timeout: Explicit operation timeout """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_nodes", node_id, "reload_secure_settings"), params=params, @@ -30,7 +30,7 @@ def reload_secure_settings( ) @query_params("flat_settings", "timeout") - def info(self, node_id=None, metric=None, params=None, headers=None): + async def info(self, node_id=None, metric=None, params=None, headers=None): """ Returns information about nodes in the cluster. ``_ @@ -46,14 +46,14 @@ def info(self, node_id=None, metric=None, params=None, headers=None): false) :arg timeout: Explicit operation timeout """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_nodes", node_id, metric), params=params, headers=headers ) @query_params( "doc_type", "ignore_idle_threads", "interval", "snapshots", "threads", "timeout" ) - def hot_threads(self, node_id=None, params=None, headers=None): + async def hot_threads(self, node_id=None, params=None, headers=None): """ Returns information about hot threads on each node in the cluster. ``_ @@ -78,7 +78,7 @@ def hot_threads(self, node_id=None, params=None, headers=None): if "doc_type" in params: params["type"] = params.pop("doc_type") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_nodes", node_id, "hot_threads"), params=params, @@ -86,7 +86,7 @@ def hot_threads(self, node_id=None, params=None, headers=None): ) @query_params("timeout") - def usage(self, node_id=None, metric=None, params=None, headers=None): + async def usage(self, node_id=None, metric=None, params=None, headers=None): """ Returns low-level information about REST actions usage on nodes. ``_ @@ -99,7 +99,7 @@ def usage(self, node_id=None, metric=None, params=None, headers=None): metrics Valid choices: _all, rest_actions :arg timeout: Explicit operation timeout """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_nodes", node_id, "usage", metric), params=params, @@ -116,7 +116,7 @@ def usage(self, node_id=None, metric=None, params=None, headers=None): "timeout", "types", ) - def stats( + async def stats( self, node_id=None, metric=None, index_metric=None, params=None, headers=None ): """ @@ -152,7 +152,7 @@ def stats( :arg types: A comma-separated list of document types for the `indexing` index metric """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_nodes", node_id, "stats", metric, index_metric), params=params, diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 0ee11270b..54346cf9b 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -7,7 +7,7 @@ class RollupClient(NamespacedClient): @query_params() - def delete_job(self, id, params=None, headers=None): + async def delete_job(self, id, params=None, headers=None): """ Deletes an existing rollup job. ``_ @@ -17,12 +17,12 @@ def delete_job(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_rollup", "job", id), params=params, headers=headers ) @query_params() - def get_jobs(self, id=None, params=None, headers=None): + async def get_jobs(self, id=None, params=None, headers=None): """ Retrieves the configuration, stats, and status of rollup jobs. ``_ @@ -30,12 +30,12 @@ def get_jobs(self, id=None, params=None, headers=None): :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_rollup", "job", id), params=params, headers=headers ) @query_params() - def get_rollup_caps(self, id=None, params=None, headers=None): + async def get_rollup_caps(self, id=None, params=None, headers=None): """ Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. @@ -44,12 +44,12 @@ def get_rollup_caps(self, id=None, params=None, headers=None): :arg id: The ID of the index to check rollup capabilities on, or left blank for all jobs """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_rollup", "data", id), params=params, headers=headers ) @query_params() - def get_rollup_index_caps(self, index, params=None, headers=None): + async def get_rollup_index_caps(self, index, params=None, headers=None): """ Returns the rollup capabilities of all jobs inside of a rollup index (e.g. the index where rollup data is stored). @@ -61,12 +61,12 @@ def get_rollup_index_caps(self, index, params=None, headers=None): if index in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'index'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_rollup", "data"), params=params, headers=headers ) @query_params() - def put_job(self, id, body, params=None, headers=None): + async def put_job(self, id, body, params=None, headers=None): """ Creates a rollup job. ``_ @@ -78,7 +78,7 @@ def put_job(self, id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_rollup", "job", id), params=params, @@ -87,7 +87,9 @@ def put_job(self, id, body, params=None, headers=None): ) @query_params("rest_total_hits_as_int", "typed_keys") - def rollup_search(self, index, body, doc_type=None, params=None, headers=None): + async def rollup_search( + self, index, body, doc_type=None, params=None, headers=None + ): """ Enables searching rolled-up data using the standard query DSL. ``_ @@ -105,7 +107,7 @@ def rollup_search(self, index, body, doc_type=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, doc_type, "_rollup_search"), params=params, @@ -114,7 +116,7 @@ def rollup_search(self, index, body, doc_type=None, params=None, headers=None): ) @query_params() - def start_job(self, id, params=None, headers=None): + async def start_job(self, id, params=None, headers=None): """ Starts an existing, stopped rollup job. ``_ @@ -124,7 +126,7 @@ def start_job(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_rollup", "job", id, "_start"), params=params, @@ -132,7 +134,7 @@ def start_job(self, id, params=None, headers=None): ) @query_params("timeout", "wait_for_completion") - def stop_job(self, id, params=None, headers=None): + async def stop_job(self, id, params=None, headers=None): """ Stops an existing, started rollup job. ``_ @@ -147,7 +149,7 @@ def stop_job(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_rollup", "job", id, "_stop"), params=params, diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index 1616493de..b45357ad6 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -7,7 +7,7 @@ class SearchableSnapshotsClient(NamespacedClient): @query_params("allow_no_indices", "expand_wildcards", "ignore_unavailable") - def clear_cache(self, index=None, params=None, headers=None): + async def clear_cache(self, index=None, params=None, headers=None): """ Clear the cache of searchable snapshots. ``_ @@ -23,7 +23,7 @@ def clear_cache(self, index=None, params=None, headers=None): :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path(index, "_searchable_snapshots", "cache", "clear"), params=params, @@ -31,7 +31,7 @@ def clear_cache(self, index=None, params=None, headers=None): ) @query_params("master_timeout", "wait_for_completion") - def mount(self, repository, snapshot, body, params=None, headers=None): + async def mount(self, repository, snapshot, body, params=None, headers=None): """ Mount a snapshot as a searchable index. ``_ @@ -50,7 +50,7 @@ def mount(self, repository, snapshot, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_snapshot", repository, snapshot, "_mount"), params=params, @@ -59,7 +59,7 @@ def mount(self, repository, snapshot, body, params=None, headers=None): ) @query_params() - def repository_stats(self, repository, params=None, headers=None): + async def repository_stats(self, repository, params=None, headers=None): """ Retrieve usage statistics about a snapshot repository. ``_ @@ -69,7 +69,7 @@ def repository_stats(self, repository, params=None, headers=None): if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_snapshot", repository, "_stats"), params=params, @@ -77,14 +77,14 @@ def repository_stats(self, repository, params=None, headers=None): ) @query_params() - def stats(self, index=None, params=None, headers=None): + async def stats(self, index=None, params=None, headers=None): """ Retrieve various statistics about searchable snapshots. ``_ :arg index: A comma-separated list of index names """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path(index, "_searchable_snapshots", "stats"), params=params, diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index 90e2ce7cb..b033ba40e 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -7,18 +7,18 @@ class SecurityClient(NamespacedClient): @query_params() - def authenticate(self, params=None, headers=None): + async def authenticate(self, params=None, headers=None): """ Enables authentication as a user and retrieve information about the authenticated user. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_security/_authenticate", params=params, headers=headers ) @query_params("refresh") - def change_password(self, body, username=None, params=None, headers=None): + async def change_password(self, body, username=None, params=None, headers=None): """ Changes the passwords of users in the native realm and built-in users. ``_ @@ -34,7 +34,7 @@ def change_password(self, body, username=None, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_security", "user", username, "_password"), params=params, @@ -43,7 +43,7 @@ def change_password(self, body, username=None, params=None, headers=None): ) @query_params("usernames") - def clear_cached_realms(self, realms, params=None, headers=None): + async def clear_cached_realms(self, realms, params=None, headers=None): """ Evicts users from the user cache. Can completely clear the cache or evict specific users. @@ -56,7 +56,7 @@ def clear_cached_realms(self, realms, params=None, headers=None): if realms in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'realms'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_security", "realm", realms, "_clear_cache"), params=params, @@ -64,7 +64,7 @@ def clear_cached_realms(self, realms, params=None, headers=None): ) @query_params() - def clear_cached_roles(self, name, params=None, headers=None): + async def clear_cached_roles(self, name, params=None, headers=None): """ Evicts roles from the native role cache. ``_ @@ -74,7 +74,7 @@ def clear_cached_roles(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_security", "role", name, "_clear_cache"), params=params, @@ -82,7 +82,7 @@ def clear_cached_roles(self, name, params=None, headers=None): ) @query_params("refresh") - def create_api_key(self, body, params=None, headers=None): + async def create_api_key(self, body, params=None, headers=None): """ Creates an API key for access without requiring basic authentication. ``_ @@ -96,12 +96,12 @@ def create_api_key(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", "/_security/api_key", params=params, headers=headers, body=body ) @query_params("refresh") - def delete_privileges(self, application, name, params=None, headers=None): + async def delete_privileges(self, application, name, params=None, headers=None): """ Removes application privileges. ``_ @@ -117,7 +117,7 @@ def delete_privileges(self, application, name, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_security", "privilege", application, name), params=params, @@ -125,7 +125,7 @@ def delete_privileges(self, application, name, params=None, headers=None): ) @query_params("refresh") - def delete_role(self, name, params=None, headers=None): + async def delete_role(self, name, params=None, headers=None): """ Removes roles in the native realm. ``_ @@ -139,7 +139,7 @@ def delete_role(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_security", "role", name), params=params, @@ -147,7 +147,7 @@ def delete_role(self, name, params=None, headers=None): ) @query_params("refresh") - def delete_role_mapping(self, name, params=None, headers=None): + async def delete_role_mapping(self, name, params=None, headers=None): """ Removes role mappings. ``_ @@ -161,7 +161,7 @@ def delete_role_mapping(self, name, params=None, headers=None): if name in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'name'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_security", "role_mapping", name), params=params, @@ -169,7 +169,7 @@ def delete_role_mapping(self, name, params=None, headers=None): ) @query_params("refresh") - def delete_user(self, username, params=None, headers=None): + async def delete_user(self, username, params=None, headers=None): """ Deletes users from the native realm. ``_ @@ -183,7 +183,7 @@ def delete_user(self, username, params=None, headers=None): if username in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'username'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_security", "user", username), params=params, @@ -191,7 +191,7 @@ def delete_user(self, username, params=None, headers=None): ) @query_params("refresh") - def disable_user(self, username, params=None, headers=None): + async def disable_user(self, username, params=None, headers=None): """ Disables users in the native realm. ``_ @@ -205,7 +205,7 @@ def disable_user(self, username, params=None, headers=None): if username in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'username'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_security", "user", username, "_disable"), params=params, @@ -213,7 +213,7 @@ def disable_user(self, username, params=None, headers=None): ) @query_params("refresh") - def enable_user(self, username, params=None, headers=None): + async def enable_user(self, username, params=None, headers=None): """ Enables users in the native realm. ``_ @@ -227,7 +227,7 @@ def enable_user(self, username, params=None, headers=None): if username in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'username'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_security", "user", username, "_enable"), params=params, @@ -235,7 +235,7 @@ def enable_user(self, username, params=None, headers=None): ) @query_params("id", "name", "owner", "realm_name", "username") - def get_api_key(self, params=None, headers=None): + async def get_api_key(self, params=None, headers=None): """ Retrieves information for one or more API keys. ``_ @@ -249,12 +249,14 @@ def get_api_key(self, params=None, headers=None): :arg username: user name of the user who created this API key to be retrieved """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_security/api_key", params=params, headers=headers ) @query_params() - def get_privileges(self, application=None, name=None, params=None, headers=None): + async def get_privileges( + self, application=None, name=None, params=None, headers=None + ): """ Retrieves application privileges. ``_ @@ -262,7 +264,7 @@ def get_privileges(self, application=None, name=None, params=None, headers=None) :arg application: Application name :arg name: Privilege name """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_security", "privilege", application, name), params=params, @@ -270,26 +272,26 @@ def get_privileges(self, application=None, name=None, params=None, headers=None) ) @query_params() - def get_role(self, name=None, params=None, headers=None): + async def get_role(self, name=None, params=None, headers=None): """ Retrieves roles in the native realm. ``_ :arg name: Role name """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_security", "role", name), params=params, headers=headers ) @query_params() - def get_role_mapping(self, name=None, params=None, headers=None): + async def get_role_mapping(self, name=None, params=None, headers=None): """ Retrieves role mappings. ``_ :arg name: Role-Mapping name """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_security", "role_mapping", name), params=params, @@ -297,7 +299,7 @@ def get_role_mapping(self, name=None, params=None, headers=None): ) @query_params() - def get_token(self, body, params=None, headers=None): + async def get_token(self, body, params=None, headers=None): """ Creates a bearer token for access without requiring basic authentication. ``_ @@ -307,19 +309,19 @@ def get_token(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_security/oauth2/token", params=params, headers=headers, body=body ) @query_params() - def get_user(self, username=None, params=None, headers=None): + async def get_user(self, username=None, params=None, headers=None): """ Retrieves information about users in the native realm and built-in users. ``_ :arg username: A comma-separated list of usernames """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_security", "user", username), params=params, @@ -327,17 +329,17 @@ def get_user(self, username=None, params=None, headers=None): ) @query_params() - def get_user_privileges(self, params=None, headers=None): + async def get_user_privileges(self, params=None, headers=None): """ Retrieves application privileges. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_security/user/_privileges", params=params, headers=headers ) @query_params() - def has_privileges(self, body, user=None, params=None, headers=None): + async def has_privileges(self, body, user=None, params=None, headers=None): """ Determines whether the specified user has a specified list of privileges. ``_ @@ -348,7 +350,7 @@ def has_privileges(self, body, user=None, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_security", "user", user, "_has_privileges"), params=params, @@ -357,7 +359,7 @@ def has_privileges(self, body, user=None, params=None, headers=None): ) @query_params() - def invalidate_api_key(self, body, params=None, headers=None): + async def invalidate_api_key(self, body, params=None, headers=None): """ Invalidates one or more API keys. ``_ @@ -367,12 +369,12 @@ def invalidate_api_key(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", "/_security/api_key", params=params, headers=headers, body=body ) @query_params() - def invalidate_token(self, body, params=None, headers=None): + async def invalidate_token(self, body, params=None, headers=None): """ Invalidates one or more access tokens or refresh tokens. ``_ @@ -382,7 +384,7 @@ def invalidate_token(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", "/_security/oauth2/token", params=params, @@ -391,7 +393,7 @@ def invalidate_token(self, body, params=None, headers=None): ) @query_params("refresh") - def put_privileges(self, body, params=None, headers=None): + async def put_privileges(self, body, params=None, headers=None): """ Adds or updates application privileges. ``_ @@ -405,12 +407,12 @@ def put_privileges(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", "/_security/privilege/", params=params, headers=headers, body=body ) @query_params("refresh") - def put_role(self, name, body, params=None, headers=None): + async def put_role(self, name, body, params=None, headers=None): """ Adds and updates roles in the native realm. ``_ @@ -426,7 +428,7 @@ def put_role(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_security", "role", name), params=params, @@ -435,7 +437,7 @@ def put_role(self, name, body, params=None, headers=None): ) @query_params("refresh") - def put_role_mapping(self, name, body, params=None, headers=None): + async def put_role_mapping(self, name, body, params=None, headers=None): """ Creates and updates role mappings. ``_ @@ -451,7 +453,7 @@ def put_role_mapping(self, name, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_security", "role_mapping", name), params=params, @@ -460,7 +462,7 @@ def put_role_mapping(self, name, body, params=None, headers=None): ) @query_params("refresh") - def put_user(self, username, body, params=None, headers=None): + async def put_user(self, username, body, params=None, headers=None): """ Adds and updates users in the native realm. These users are commonly referred to as native users. @@ -477,7 +479,7 @@ def put_user(self, username, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_security", "user", username), params=params, @@ -486,12 +488,12 @@ def put_user(self, username, body, params=None, headers=None): ) @query_params() - def get_builtin_privileges(self, params=None, headers=None): + async def get_builtin_privileges(self, params=None, headers=None): """ Retrieves the list of cluster privileges and index privileges that are available in this version of Elasticsearch. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_security/privilege/_builtin", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 915650d8e..336953060 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -7,7 +7,7 @@ class SlmClient(NamespacedClient): @query_params() - def delete_lifecycle(self, policy_id, params=None, headers=None): + async def delete_lifecycle(self, policy_id, params=None, headers=None): """ Deletes an existing snapshot lifecycle policy. ``_ @@ -18,7 +18,7 @@ def delete_lifecycle(self, policy_id, params=None, headers=None): if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'policy_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_slm", "policy", policy_id), params=params, @@ -26,7 +26,7 @@ def delete_lifecycle(self, policy_id, params=None, headers=None): ) @query_params() - def execute_lifecycle(self, policy_id, params=None, headers=None): + async def execute_lifecycle(self, policy_id, params=None, headers=None): """ Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. @@ -38,7 +38,7 @@ def execute_lifecycle(self, policy_id, params=None, headers=None): if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'policy_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_slm", "policy", policy_id, "_execute"), params=params, @@ -46,18 +46,18 @@ def execute_lifecycle(self, policy_id, params=None, headers=None): ) @query_params() - def execute_retention(self, params=None, headers=None): + async def execute_retention(self, params=None, headers=None): """ Deletes any snapshots that are expired according to the policy's retention rules. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_slm/_execute_retention", params=params, headers=headers ) @query_params() - def get_lifecycle(self, policy_id=None, params=None, headers=None): + async def get_lifecycle(self, policy_id=None, params=None, headers=None): """ Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. @@ -66,7 +66,7 @@ def get_lifecycle(self, policy_id=None, params=None, headers=None): :arg policy_id: Comma-separated list of snapshot lifecycle policies to retrieve """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_slm", "policy", policy_id), params=params, @@ -74,18 +74,18 @@ def get_lifecycle(self, policy_id=None, params=None, headers=None): ) @query_params() - def get_stats(self, params=None, headers=None): + async def get_stats(self, params=None, headers=None): """ Returns global and policy-level statistics about actions taken by snapshot lifecycle management. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_slm/stats", params=params, headers=headers ) @query_params() - def put_lifecycle(self, policy_id, body=None, params=None, headers=None): + async def put_lifecycle(self, policy_id, body=None, params=None, headers=None): """ Creates or updates a snapshot lifecycle policy. ``_ @@ -96,7 +96,7 @@ def put_lifecycle(self, policy_id, body=None, params=None, headers=None): if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'policy_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_slm", "policy", policy_id), params=params, @@ -105,31 +105,31 @@ def put_lifecycle(self, policy_id, body=None, params=None, headers=None): ) @query_params() - def get_status(self, params=None, headers=None): + async def get_status(self, params=None, headers=None): """ Retrieves the status of snapshot lifecycle management (SLM). ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_slm/status", params=params, headers=headers ) @query_params() - def start(self, params=None, headers=None): + async def start(self, params=None, headers=None): """ Turns on snapshot lifecycle management (SLM). ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_slm/start", params=params, headers=headers ) @query_params() - def stop(self, params=None, headers=None): + async def stop(self, params=None, headers=None): """ Turns off snapshot lifecycle management (SLM). ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_slm/stop", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 55b4b759e..7db0de8f0 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -7,7 +7,7 @@ class SnapshotClient(NamespacedClient): @query_params("master_timeout", "wait_for_completion") - def create(self, repository, snapshot, body=None, params=None, headers=None): + async def create(self, repository, snapshot, body=None, params=None, headers=None): """ Creates a snapshot in a repository. ``_ @@ -24,7 +24,7 @@ def create(self, repository, snapshot, body=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_snapshot", repository, snapshot), params=params, @@ -33,7 +33,7 @@ def create(self, repository, snapshot, body=None, params=None, headers=None): ) @query_params("master_timeout") - def delete(self, repository, snapshot, params=None, headers=None): + async def delete(self, repository, snapshot, params=None, headers=None): """ Deletes a snapshot. ``_ @@ -47,7 +47,7 @@ def delete(self, repository, snapshot, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_snapshot", repository, snapshot), params=params, @@ -55,7 +55,7 @@ def delete(self, repository, snapshot, params=None, headers=None): ) @query_params("ignore_unavailable", "master_timeout", "verbose") - def get(self, repository, snapshot, params=None, headers=None): + async def get(self, repository, snapshot, params=None, headers=None): """ Returns information about a snapshot. ``_ @@ -74,7 +74,7 @@ def get(self, repository, snapshot, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_snapshot", repository, snapshot), params=params, @@ -82,7 +82,7 @@ def get(self, repository, snapshot, params=None, headers=None): ) @query_params("master_timeout", "timeout") - def delete_repository(self, repository, params=None, headers=None): + async def delete_repository(self, repository, params=None, headers=None): """ Deletes a repository. ``_ @@ -95,7 +95,7 @@ def delete_repository(self, repository, params=None, headers=None): if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_snapshot", repository), params=params, @@ -103,7 +103,7 @@ def delete_repository(self, repository, params=None, headers=None): ) @query_params("local", "master_timeout") - def get_repository(self, repository=None, params=None, headers=None): + async def get_repository(self, repository=None, params=None, headers=None): """ Returns information about a repository. ``_ @@ -114,12 +114,12 @@ def get_repository(self, repository=None, params=None, headers=None): :arg master_timeout: Explicit operation timeout for connection to master node """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_snapshot", repository), params=params, headers=headers ) @query_params("master_timeout", "timeout", "verify") - def create_repository(self, repository, body, params=None, headers=None): + async def create_repository(self, repository, body, params=None, headers=None): """ Creates a repository. ``_ @@ -135,7 +135,7 @@ def create_repository(self, repository, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_snapshot", repository), params=params, @@ -144,7 +144,7 @@ def create_repository(self, repository, body, params=None, headers=None): ) @query_params("master_timeout", "wait_for_completion") - def restore(self, repository, snapshot, body=None, params=None, headers=None): + async def restore(self, repository, snapshot, body=None, params=None, headers=None): """ Restores a snapshot. ``_ @@ -161,7 +161,7 @@ def restore(self, repository, snapshot, body=None, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_snapshot", repository, snapshot, "_restore"), params=params, @@ -170,7 +170,7 @@ def restore(self, repository, snapshot, body=None, params=None, headers=None): ) @query_params("ignore_unavailable", "master_timeout") - def status(self, repository=None, snapshot=None, params=None, headers=None): + async def status(self, repository=None, snapshot=None, params=None, headers=None): """ Returns information about the status of a snapshot. ``_ @@ -183,7 +183,7 @@ def status(self, repository=None, snapshot=None, params=None, headers=None): :arg master_timeout: Explicit operation timeout for connection to master node """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_snapshot", repository, snapshot, "_status"), params=params, @@ -191,7 +191,7 @@ def status(self, repository=None, snapshot=None, params=None, headers=None): ) @query_params("master_timeout", "timeout") - def verify_repository(self, repository, params=None, headers=None): + async def verify_repository(self, repository, params=None, headers=None): """ Verifies a repository. ``_ @@ -204,7 +204,7 @@ def verify_repository(self, repository, params=None, headers=None): if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_snapshot", repository, "_verify"), params=params, @@ -212,10 +212,10 @@ def verify_repository(self, repository, params=None, headers=None): ) @query_params("master_timeout", "timeout") - def cleanup_repository(self, repository, params=None, headers=None): + async def cleanup_repository(self, repository, params=None, headers=None): """ Removes stale data from repository. - ``_ + ``_ :arg repository: A repository name :arg master_timeout: Explicit operation timeout for connection @@ -225,7 +225,7 @@ def cleanup_repository(self, repository, params=None, headers=None): if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'repository'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_snapshot", repository, "_cleanup"), params=params, diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index e043fee3d..ea581e2bb 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -7,7 +7,7 @@ class SqlClient(NamespacedClient): @query_params() - def clear_cursor(self, body, params=None, headers=None): + async def clear_cursor(self, body, params=None, headers=None): """ Clears the SQL cursor ``_ @@ -18,12 +18,12 @@ def clear_cursor(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_sql/close", params=params, headers=headers, body=body ) @query_params("format") - def query(self, body, params=None, headers=None): + async def query(self, body, params=None, headers=None): """ Executes a SQL request ``_ @@ -36,12 +36,12 @@ def query(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_sql", params=params, headers=headers, body=body ) @query_params() - def translate(self, body, params=None, headers=None): + async def translate(self, body, params=None, headers=None): """ Translates SQL into Elasticsearch queries ``_ @@ -51,6 +51,6 @@ def translate(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_sql/translate", params=params, headers=headers, body=body ) diff --git a/elasticsearch/_async/client/ssl.py b/elasticsearch/_async/client/ssl.py index e23d47375..a6c14f5dc 100644 --- a/elasticsearch/_async/client/ssl.py +++ b/elasticsearch/_async/client/ssl.py @@ -7,12 +7,12 @@ class SslClient(NamespacedClient): @query_params() - def certificates(self, params=None, headers=None): + async def certificates(self, params=None, headers=None): """ Retrieves information about the X.509 certificates used to encrypt communications in the cluster. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_ssl/certificates", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 426bc8c8c..c4feeb6e2 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -15,7 +15,7 @@ class TasksClient(NamespacedClient): "timeout", "wait_for_completion", ) - def list(self, params=None, headers=None): + async def list(self, params=None, headers=None): """ Returns a list of tasks. ``_ @@ -34,12 +34,12 @@ def list(self, params=None, headers=None): :arg wait_for_completion: Wait for the matching tasks to complete (default: false) """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_tasks", params=params, headers=headers ) @query_params("actions", "nodes", "parent_task_id", "wait_for_completion") - def cancel(self, task_id=None, params=None, headers=None): + async def cancel(self, task_id=None, params=None, headers=None): """ Cancels a task, if it can be cancelled through an API. ``_ @@ -57,7 +57,7 @@ def cancel(self, task_id=None, params=None, headers=None): cancellation of the task and its descendant tasks is completed. Defaults to false """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_tasks", task_id, "_cancel"), params=params, @@ -65,7 +65,7 @@ def cancel(self, task_id=None, params=None, headers=None): ) @query_params("timeout", "wait_for_completion") - def get(self, task_id, params=None, headers=None): + async def get(self, task_id, params=None, headers=None): """ Returns information about a task. ``_ @@ -79,6 +79,6 @@ def get(self, task_id, params=None, headers=None): if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'task_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_tasks", task_id), params=params, headers=headers ) diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 23159bbb3..b368da87a 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -7,7 +7,7 @@ class TransformClient(NamespacedClient): @query_params("force") - def delete_transform(self, transform_id, params=None, headers=None): + async def delete_transform(self, transform_id, params=None, headers=None): """ Deletes an existing transform. ``_ @@ -22,7 +22,7 @@ def delete_transform(self, transform_id, params=None, headers=None): "Empty value passed for a required argument 'transform_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_transform", transform_id), params=params, @@ -30,7 +30,7 @@ def delete_transform(self, transform_id, params=None, headers=None): ) @query_params("allow_no_match", "from_", "size") - def get_transform(self, transform_id=None, params=None, headers=None): + async def get_transform(self, transform_id=None, params=None, headers=None): """ Retrieves configuration information for transforms. ``_ @@ -49,7 +49,7 @@ def get_transform(self, transform_id=None, params=None, headers=None): if "from_" in params: params["from"] = params.pop("from_") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_transform", transform_id), params=params, @@ -57,7 +57,7 @@ def get_transform(self, transform_id=None, params=None, headers=None): ) @query_params("allow_no_match", "from_", "size") - def get_transform_stats(self, transform_id, params=None, headers=None): + async def get_transform_stats(self, transform_id, params=None, headers=None): """ Retrieves usage information for transforms. ``_ @@ -80,7 +80,7 @@ def get_transform_stats(self, transform_id, params=None, headers=None): "Empty value passed for a required argument 'transform_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_transform", transform_id, "_stats"), params=params, @@ -88,7 +88,7 @@ def get_transform_stats(self, transform_id, params=None, headers=None): ) @query_params() - def preview_transform(self, body, params=None, headers=None): + async def preview_transform(self, body, params=None, headers=None): """ Previews a transform. ``_ @@ -98,12 +98,12 @@ def preview_transform(self, body, params=None, headers=None): if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_transform/_preview", params=params, headers=headers, body=body ) @query_params("defer_validation") - def put_transform(self, transform_id, body, params=None, headers=None): + async def put_transform(self, transform_id, body, params=None, headers=None): """ Instantiates a transform. ``_ @@ -117,7 +117,7 @@ def put_transform(self, transform_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_transform", transform_id), params=params, @@ -126,7 +126,7 @@ def put_transform(self, transform_id, body, params=None, headers=None): ) @query_params("timeout") - def start_transform(self, transform_id, params=None, headers=None): + async def start_transform(self, transform_id, params=None, headers=None): """ Starts one or more transforms. ``_ @@ -140,7 +140,7 @@ def start_transform(self, transform_id, params=None, headers=None): "Empty value passed for a required argument 'transform_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_transform", transform_id, "_start"), params=params, @@ -154,7 +154,7 @@ def start_transform(self, transform_id, params=None, headers=None): "wait_for_checkpoint", "wait_for_completion", ) - def stop_transform(self, transform_id, params=None, headers=None): + async def stop_transform(self, transform_id, params=None, headers=None): """ Stops one or more transforms. ``_ @@ -177,7 +177,7 @@ def stop_transform(self, transform_id, params=None, headers=None): "Empty value passed for a required argument 'transform_id'." ) - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_transform", transform_id, "_stop"), params=params, @@ -185,7 +185,7 @@ def stop_transform(self, transform_id, params=None, headers=None): ) @query_params("defer_validation") - def update_transform(self, transform_id, body, params=None, headers=None): + async def update_transform(self, transform_id, body, params=None, headers=None): """ Updates certain properties of a transform. ``_ @@ -199,7 +199,7 @@ def update_transform(self, transform_id, body, params=None, headers=None): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") - return self.transport.perform_request( + return await self.transport.perform_request( "POST", _make_path("_transform", transform_id, "_update"), params=params, diff --git a/elasticsearch/_async/client/utils.py b/elasticsearch/_async/client/utils.py index 11082db47..f28944f3c 100644 --- a/elasticsearch/_async/client/utils.py +++ b/elasticsearch/_async/client/utils.py @@ -2,129 +2,12 @@ # Elasticsearch B.V licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information -from __future__ import unicode_literals - -import weakref -from datetime import date, datetime -from functools import wraps -from ..compat import string_types, quote, PY2 - -# parts of URL to be omitted -SKIP_IN_PATH = (None, "", b"", [], ()) - - -def _escape(value): - """ - Escape a single value of a URL string or a query parameter. If it is a list - or tuple, turn it into a comma-separated string first. - """ - - # make sequences into comma-separated stings - if isinstance(value, (list, tuple)): - value = ",".join(value) - - # dates and datetimes into isoformat - elif isinstance(value, (date, datetime)): - value = value.isoformat() - - # make bools into true/false strings - elif isinstance(value, bool): - value = str(value).lower() - - # don't decode bytestrings - elif isinstance(value, bytes): - return value - - # encode strings to utf-8 - if isinstance(value, string_types): - if PY2 and isinstance(value, unicode): # noqa: F821 - return value.encode("utf-8") - if not PY2 and isinstance(value, str): - return value.encode("utf-8") - - return str(value) - - -def _make_path(*parts): - """ - Create a URL string from parts, omit all `None` values and empty strings. - Convert lists and tuples to comma separated values. - """ - # TODO: maybe only allow some parts to be lists/tuples ? - return "/" + "/".join( - # preserve ',' and '*' in url for nicer URLs in logs - quote(_escape(p), b",*") - for p in parts - if p not in SKIP_IN_PATH - ) - - -# parameters that apply to all methods -GLOBAL_PARAMS = ("pretty", "human", "error_trace", "format", "filter_path") - - -def query_params(*es_query_params): - """ - Decorator that pops all accepted parameters from method's kwargs and puts - them in the params argument. - """ - - def _wrapper(func): - @wraps(func) - def _wrapped(*args, **kwargs): - params = (kwargs.pop("params", None) or {}).copy() - headers = { - k.lower(): v - for k, v in (kwargs.pop("headers", None) or {}).copy().items() - } - - if "opaque_id" in kwargs: - headers["x-opaque-id"] = kwargs.pop("opaque_id") - - for p in es_query_params + GLOBAL_PARAMS: - if p in kwargs: - v = kwargs.pop(p) - if v is not None: - params[p] = _escape(v) - - # don't treat ignore, request_timeout, and opaque_id as other params to avoid escaping - for p in ("ignore", "request_timeout"): - if p in kwargs: - params[p] = kwargs.pop(p) - return func(*args, params=params, headers=headers, **kwargs) - - return _wrapped - - return _wrapper - - -def _bulk_body(serializer, body): - # if not passed in a string, serialize items and join by newline - if not isinstance(body, string_types): - body = "\n".join(map(serializer.dumps, body)) - - # bulk body must end with a newline - if isinstance(body, bytes): - if not body.endswith(b"\n"): - body += b"\n" - elif isinstance(body, string_types) and not body.endswith("\n"): - body += "\n" - - return body - - -class NamespacedClient(object): - def __init__(self, client): - self.client = client - - @property - def transport(self): - return self.client.transport - - -class AddonClient(NamespacedClient): - @classmethod - def infect_client(cls, client): - addon = cls(weakref.proxy(client)) - setattr(client, cls.namespace, addon) - return client +from ...client.utils import ( # noqa + _make_path, + _normalize_hosts, + _escape, + _bulk_body, + query_params, + SKIP_IN_PATH, + NamespacedClient, +) diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index 654c4b489..ac4b20883 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -7,7 +7,7 @@ class WatcherClient(NamespacedClient): @query_params() - def ack_watch(self, watch_id, action_id=None, params=None, headers=None): + async def ack_watch(self, watch_id, action_id=None, params=None, headers=None): """ Acknowledges a watch, manually throttling the execution of the watch's actions. ``_ @@ -19,7 +19,7 @@ def ack_watch(self, watch_id, action_id=None, params=None, headers=None): if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'watch_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_watcher", "watch", watch_id, "_ack", action_id), params=params, @@ -27,7 +27,7 @@ def ack_watch(self, watch_id, action_id=None, params=None, headers=None): ) @query_params() - def activate_watch(self, watch_id, params=None, headers=None): + async def activate_watch(self, watch_id, params=None, headers=None): """ Activates a currently inactive watch. ``_ @@ -37,7 +37,7 @@ def activate_watch(self, watch_id, params=None, headers=None): if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'watch_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_watcher", "watch", watch_id, "_activate"), params=params, @@ -45,7 +45,7 @@ def activate_watch(self, watch_id, params=None, headers=None): ) @query_params() - def deactivate_watch(self, watch_id, params=None, headers=None): + async def deactivate_watch(self, watch_id, params=None, headers=None): """ Deactivates a currently active watch. ``_ @@ -55,7 +55,7 @@ def deactivate_watch(self, watch_id, params=None, headers=None): if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'watch_id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_watcher", "watch", watch_id, "_deactivate"), params=params, @@ -63,7 +63,7 @@ def deactivate_watch(self, watch_id, params=None, headers=None): ) @query_params() - def delete_watch(self, id, params=None, headers=None): + async def delete_watch(self, id, params=None, headers=None): """ Removes a watch from Watcher. ``_ @@ -73,7 +73,7 @@ def delete_watch(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "DELETE", _make_path("_watcher", "watch", id), params=params, @@ -81,7 +81,7 @@ def delete_watch(self, id, params=None, headers=None): ) @query_params("debug") - def execute_watch(self, body=None, id=None, params=None, headers=None): + async def execute_watch(self, body=None, id=None, params=None, headers=None): """ Forces the execution of a stored watch. ``_ @@ -91,7 +91,7 @@ def execute_watch(self, body=None, id=None, params=None, headers=None): :arg debug: indicates whether the watch should execute in debug mode """ - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_watcher", "watch", id, "_execute"), params=params, @@ -100,7 +100,7 @@ def execute_watch(self, body=None, id=None, params=None, headers=None): ) @query_params() - def get_watch(self, id, params=None, headers=None): + async def get_watch(self, id, params=None, headers=None): """ Retrieves a watch by its ID. ``_ @@ -110,12 +110,12 @@ def get_watch(self, id, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_watcher", "watch", id), params=params, headers=headers ) @query_params("active", "if_primary_term", "if_seq_no", "version") - def put_watch(self, id, body=None, params=None, headers=None): + async def put_watch(self, id, body=None, params=None, headers=None): """ Creates a new watch, or updates an existing one. ``_ @@ -132,7 +132,7 @@ def put_watch(self, id, body=None, params=None, headers=None): if id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'id'.") - return self.transport.perform_request( + return await self.transport.perform_request( "PUT", _make_path("_watcher", "watch", id), params=params, @@ -141,17 +141,17 @@ def put_watch(self, id, body=None, params=None, headers=None): ) @query_params() - def start(self, params=None, headers=None): + async def start(self, params=None, headers=None): """ Starts Watcher if it is not already running. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_watcher/_start", params=params, headers=headers ) @query_params("emit_stacktraces") - def stats(self, metric=None, params=None, headers=None): + async def stats(self, metric=None, params=None, headers=None): """ Retrieves the current Watcher metrics. ``_ @@ -162,7 +162,7 @@ def stats(self, metric=None, params=None, headers=None): :arg emit_stacktraces: Emits stack traces of currently running watches """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", _make_path("_watcher", "stats", metric), params=params, @@ -170,11 +170,11 @@ def stats(self, metric=None, params=None, headers=None): ) @query_params() - def stop(self, params=None, headers=None): + async def stop(self, params=None, headers=None): """ Stops Watcher if it is running. ``_ """ - return self.transport.perform_request( + return await self.transport.perform_request( "POST", "/_watcher/_stop", params=params, headers=headers ) diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index 61560fd4a..6715535de 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -11,7 +11,7 @@ def __getattr__(self, attr_name): # AUTO-GENERATED-API-DEFINITIONS # @query_params("categories") - def info(self, params=None, headers=None): + async def info(self, params=None, headers=None): """ Retrieves information about the installed X-Pack features. ``_ @@ -19,18 +19,18 @@ def info(self, params=None, headers=None): :arg categories: Comma-separated list of info categories. Can be any of: build, license, features """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_xpack", params=params, headers=headers ) @query_params("master_timeout") - def usage(self, params=None, headers=None): + async def usage(self, params=None, headers=None): """ Retrieves usage information about the installed X-Pack features. ``_ :arg master_timeout: Specify timeout for watch write operation """ - return self.transport.perform_request( + return await self.transport.perform_request( "GET", "/_xpack/usage", params=params, headers=headers ) diff --git a/elasticsearch/client/cluster.py b/elasticsearch/client/cluster.py index 600b89e70..7f2e9e707 100644 --- a/elasticsearch/client/cluster.py +++ b/elasticsearch/client/cluster.py @@ -243,7 +243,7 @@ def allocation_explain(self, body=None, params=None, headers=None): def delete_component_template(self, name, params=None, headers=None): """ Deletes a component template - ``_ + ``_ :arg name: The name of the template :arg master_timeout: Specify timeout for connection to master @@ -263,7 +263,7 @@ def delete_component_template(self, name, params=None, headers=None): def get_component_template(self, name=None, params=None, headers=None): """ Returns one or more component templates - ``_ + ``_ :arg name: The comma separated names of the component templates :arg local: Return local information, do not retrieve the state @@ -282,7 +282,7 @@ def get_component_template(self, name=None, params=None, headers=None): def put_component_template(self, name, body, params=None, headers=None): """ Creates or updates a component template - ``_ + ``_ :arg name: The name of the template :arg body: The template definition @@ -307,7 +307,7 @@ def put_component_template(self, name, body, params=None, headers=None): def exists_component_template(self, name, params=None, headers=None): """ Returns information about whether a particular component template exist - ``_ + ``_ :arg name: The name of the template :arg local: Return local information, do not retrieve the state diff --git a/elasticsearch/client/indices.py b/elasticsearch/client/indices.py index 68efaf626..f27ac59cf 100644 --- a/elasticsearch/client/indices.py +++ b/elasticsearch/client/indices.py @@ -1236,19 +1236,6 @@ def delete_data_stream(self, name, params=None, headers=None): "DELETE", _make_path("_data_stream", name), params=params, headers=headers ) - @query_params() - def get_data_streams(self, name=None, params=None, headers=None): - """ - Returns data streams. - ``_ - - :arg name: The name or wildcard expression of the requested data - streams - """ - return self.transport.perform_request( - "GET", _make_path("_data_streams", name), params=params, headers=headers - ) - @query_params("master_timeout", "timeout") def delete_index_template(self, name, params=None, headers=None): """ @@ -1362,3 +1349,40 @@ def simulate_index_template(self, name, body=None, params=None, headers=None): headers=headers, body=body, ) + + @query_params() + def get_data_stream(self, name=None, params=None, headers=None): + """ + Returns data streams. + ``_ + + :arg name: The name or wildcard expression of the requested data + streams + """ + return self.transport.perform_request( + "GET", _make_path("_data_stream", name), params=params, headers=headers + ) + + @query_params("cause", "create", "master_timeout") + def simulate_template(self, body=None, name=None, params=None, headers=None): + """ + Simulate resolving the given template name or body + ``_ + + :arg body: New index template definition to be simulated, if no + index template name is specified + :arg name: The name of the index template + :arg cause: User defined reason for dry-run creating the new + template for simulation purposes + :arg create: Whether the index template we optionally defined in + the body should only be dry-run added if new or can also replace an + existing one + :arg master_timeout: Specify timeout for connection to master + """ + return self.transport.perform_request( + "POST", + _make_path("_index_template", "_simulate", name), + params=params, + headers=headers, + body=body, + ) diff --git a/elasticsearch/client/ml.py b/elasticsearch/client/ml.py index ce594396a..59468dfca 100644 --- a/elasticsearch/client/ml.py +++ b/elasticsearch/client/ml.py @@ -115,13 +115,19 @@ def delete_datafeed(self, datafeed_id, params=None, headers=None): ) @query_params() - def delete_expired_data(self, params=None, headers=None): + def delete_expired_data(self, body=None, params=None, headers=None): """ Deletes expired and unused machine learning data. ``_ + + :arg body: deleting expired data parameters """ return self.transport.perform_request( - "DELETE", "/_ml/_delete_expired_data", params=params, headers=headers + "DELETE", + "/_ml/_delete_expired_data", + params=params, + headers=headers, + body=body, ) @query_params() @@ -1012,6 +1018,7 @@ def update_job(self, job_id, body, params=None, headers=None): def validate(self, body, params=None, headers=None): """ Validates an anomaly detection job. + ``_ :arg body: The job config """ @@ -1030,6 +1037,7 @@ def validate(self, body, params=None, headers=None): def validate_detector(self, body, params=None, headers=None): """ Validates an anomaly detection detector. + ``_ :arg body: The detector """ diff --git a/elasticsearch/client/snapshot.py b/elasticsearch/client/snapshot.py index 55b4b759e..5ea6f3010 100644 --- a/elasticsearch/client/snapshot.py +++ b/elasticsearch/client/snapshot.py @@ -215,7 +215,7 @@ def verify_repository(self, repository, params=None, headers=None): def cleanup_repository(self, repository, params=None, headers=None): """ Removes stale data from repository. - ``_ + ``_ :arg repository: A repository name :arg master_timeout: Explicit operation timeout for connection From a979c9c4b614e8997e229c00f465b672267bff60 Mon Sep 17 00:00:00 2001 From: Seth Michael Larson Date: Fri, 15 May 2020 09:37:49 -0500 Subject: [PATCH 5/5] Add test suite for async API --- .../test_async/test_server/__init__.py | 3 + .../test_async/test_server/conftest.py | 61 +++++ .../test_async/test_server/test_clients.py | 30 +++ .../test_server/test_rest_api_spec.py | 210 ++++++++++++++++++ .../test_server/test_rest_api_spec.py | 12 +- 5 files changed, 315 insertions(+), 1 deletion(-) diff --git a/test_elasticsearch/test_async/test_server/__init__.py b/test_elasticsearch/test_async/test_server/__init__.py index e69de29bb..1a3c439ef 100644 --- a/test_elasticsearch/test_async/test_server/__init__.py +++ b/test_elasticsearch/test_async/test_server/__init__.py @@ -0,0 +1,3 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information diff --git a/test_elasticsearch/test_async/test_server/conftest.py b/test_elasticsearch/test_async/test_server/conftest.py index e69de29bb..f97b17627 100644 --- a/test_elasticsearch/test_async/test_server/conftest.py +++ b/test_elasticsearch/test_async/test_server/conftest.py @@ -0,0 +1,61 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +import os +import pytest +import asyncio +import elasticsearch + +pytestmark = pytest.mark.asyncio + + +@pytest.fixture(scope="function") +async def async_client(): + client = None + try: + if not hasattr(elasticsearch, "AsyncElasticsearch"): + pytest.skip("test requires 'AsyncElasticsearch'") + + kw = { + "timeout": 30, + "ca_certs": ".ci/certs/ca.pem", + "connection_class": elasticsearch.AIOHttpConnection, + } + + client = elasticsearch.AsyncElasticsearch( + [os.environ.get("ELASTICSEARCH_HOST", {})], **kw + ) + + # wait for yellow status + for _ in range(100): + try: + await client.cluster.health(wait_for_status="yellow") + break + except ConnectionError: + await asyncio.sleep(0.1) + else: + # timeout + pytest.skip("Elasticsearch failed to start.") + + yield client + + finally: + if client: + version = tuple( + [ + int(x) if x.isdigit() else 999 + for x in (await client.info())["version"]["number"].split(".") + ] + ) + + expand_wildcards = ["open", "closed"] + if version >= (7, 7): + expand_wildcards.append("hidden") + + await client.indices.delete( + index="*", ignore=404, expand_wildcards=expand_wildcards + ) + await client.indices.delete_template(name="*", ignore=404) + await client.indices.delete_index_template(name="*", ignore=404) + await client.close() diff --git a/test_elasticsearch/test_async/test_server/test_clients.py b/test_elasticsearch/test_async/test_server/test_clients.py index e69de29bb..c7b4279a8 100644 --- a/test_elasticsearch/test_async/test_server/test_clients.py +++ b/test_elasticsearch/test_async/test_server/test_clients.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +from __future__ import unicode_literals +import pytest + +pytestmark = pytest.mark.asyncio + + +class TestUnicode: + async def test_indices_analyze(self, async_client): + await async_client.indices.analyze(body='{"text": "привет"}') + + +class TestBulk: + async def test_bulk_works_with_string_body(self, async_client): + docs = '{ "index" : { "_index" : "bulk_test_index", "_id" : "1" } }\n{"answer": 42}' + response = await async_client.bulk(body=docs) + + assert response["errors"] is False + assert len(response["items"]) == 1 + + async def test_bulk_works_with_bytestring_body(self, async_client): + docs = b'{ "index" : { "_index" : "bulk_test_index", "_id" : "2" } }\n{"answer": 42}' + response = await async_client.bulk(body=docs) + + assert response["errors"] is False + assert len(response["items"]) == 1 diff --git a/test_elasticsearch/test_async/test_server/test_rest_api_spec.py b/test_elasticsearch/test_async/test_server/test_rest_api_spec.py index e69de29bb..d849f46da 100644 --- a/test_elasticsearch/test_async/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_async/test_server/test_rest_api_spec.py @@ -0,0 +1,210 @@ +# Licensed to Elasticsearch B.V under one or more agreements. +# Elasticsearch B.V licenses this file to you under the Apache 2.0 License. +# See the LICENSE file in the project root for more information + +""" +Dynamically generated set of TestCases based on set of yaml files decribing +some integration tests. These files are shared among all official Elasticsearch +clients. +""" +import pytest +from shutil import rmtree +import warnings +import inspect + +from elasticsearch import RequestError, ElasticsearchDeprecationWarning +from elasticsearch.helpers.test import _get_version +from ...test_server.test_rest_api_spec import ( + YamlRunner, + YAML_TEST_SPECS, + InvalidActionType, + RUN_ASYNC_REST_API_TESTS, + PARAMS_RENAMES, + IMPLEMENTED_FEATURES, +) + +pytestmark = pytest.mark.asyncio + +XPACK_FEATURES = None +ES_VERSION = None + + +async def await_if_coro(x): + if inspect.iscoroutine(x): + return await x + return x + + +class AsyncYamlRunner(YamlRunner): + async def setup(self): + if self._setup_code: + await self.run_code(self._setup_code) + + async def teardown(self): + if self._teardown_code: + await self.run_code(self._teardown_code) + + for repo, definition in ( + await self.client.snapshot.get_repository(repository="_all") + ).items(): + await self.client.snapshot.delete_repository(repository=repo) + if definition["type"] == "fs": + rmtree( + "/tmp/%s" % definition["settings"]["location"], ignore_errors=True + ) + + # stop and remove all ML stuff + if await self._feature_enabled("ml"): + await self.client.ml.stop_datafeed(datafeed_id="*", force=True) + for feed in (await self.client.ml.get_datafeeds(datafeed_id="*"))[ + "datafeeds" + ]: + await self.client.ml.delete_datafeed(datafeed_id=feed["datafeed_id"]) + + await self.client.ml.close_job(job_id="*", force=True) + for job in (await self.client.ml.get_jobs(job_id="*"))["jobs"]: + await self.client.ml.delete_job( + job_id=job["job_id"], wait_for_completion=True, force=True + ) + + # stop and remove all Rollup jobs + if await self._feature_enabled("rollup"): + for rollup in (await self.client.rollup.get_jobs(id="*"))["jobs"]: + await self.client.rollup.stop_job( + id=rollup["config"]["id"], wait_for_completion=True + ) + await self.client.rollup.delete_job(id=rollup["config"]["id"]) + + async def es_version(self): + global ES_VERSION + if ES_VERSION is None: + version_string = (await self.client.info())["version"]["number"] + if "." not in version_string: + return () + version = version_string.strip().split(".") + ES_VERSION = tuple(int(v) if v.isdigit() else 999 for v in version) + return ES_VERSION + + async def run(self): + try: + await self.setup() + await self.run_code(self._run_code) + finally: + await self.teardown() + + async def run_code(self, test): + """ Execute an instruction based on it's type. """ + print(test) + for action in test: + assert len(action) == 1 + action_type, action = list(action.items())[0] + + if hasattr(self, "run_" + action_type): + await await_if_coro(getattr(self, "run_" + action_type)(action)) + else: + raise InvalidActionType(action_type) + + async def run_do(self, action): + api = self.client + headers = action.pop("headers", None) + catch = action.pop("catch", None) + warn = action.pop("warnings", ()) + allowed_warnings = action.pop("allowed_warnings", ()) + assert len(action) == 1 + + method, args = list(action.items())[0] + args["headers"] = headers + + # locate api endpoint + for m in method.split("."): + assert hasattr(api, m) + api = getattr(api, m) + + # some parameters had to be renamed to not clash with python builtins, + # compensate + for k in PARAMS_RENAMES: + if k in args: + args[PARAMS_RENAMES[k]] = args.pop(k) + + # resolve vars + for k in args: + args[k] = self._resolve(args[k]) + + warnings.simplefilter("always", category=ElasticsearchDeprecationWarning) + with warnings.catch_warnings(record=True) as caught_warnings: + try: + self.last_response = await api(**args) + except Exception as e: + if not catch: + raise + self.run_catch(catch, e) + else: + if catch: + raise AssertionError( + "Failed to catch %r in %r." % (catch, self.last_response) + ) + + # Filter out warnings raised by other components. + caught_warnings = [ + str(w.message) + for w in caught_warnings + if w.category == ElasticsearchDeprecationWarning + and str(w.message) not in allowed_warnings + ] + + # Sorting removes the issue with order raised. We only care about + # if all warnings are raised in the single API call. + if warn and sorted(warn) != sorted(caught_warnings): + raise AssertionError( + "Expected warnings not equal to actual warnings: expected=%r actual=%r" + % (warn, caught_warnings) + ) + + async def run_skip(self, skip): + if "features" in skip: + features = skip["features"] + if not isinstance(features, (tuple, list)): + features = [features] + for feature in features: + if feature in IMPLEMENTED_FEATURES: + continue + pytest.skip("feature '%s' is not supported" % feature) + + if "version" in skip: + version, reason = skip["version"], skip["reason"] + if version == "all": + pytest.skip(reason) + min_version, max_version = version.split("-") + min_version = _get_version(min_version) or (0,) + max_version = _get_version(max_version) or (999,) + if min_version <= (await self.es_version()) <= max_version: + pytest.skip(reason) + + async def _feature_enabled(self, name): + global XPACK_FEATURES + if XPACK_FEATURES is None: + try: + xinfo = await self.client.xpack.info() + XPACK_FEATURES = set( + f for f in xinfo["features"] if xinfo["features"][f]["enabled"] + ) + IMPLEMENTED_FEATURES.add("xpack") + except RequestError: + XPACK_FEATURES = set() + IMPLEMENTED_FEATURES.add("no_xpack") + return name in XPACK_FEATURES + + +@pytest.fixture(scope="function") +def async_runner(async_client): + return AsyncYamlRunner(async_client) + + +@pytest.mark.parametrize("test_spec", YAML_TEST_SPECS) +async def test_rest_api_spec(test_spec, async_runner): + if not RUN_ASYNC_REST_API_TESTS: + pytest.skip("Skipped running async REST API tests") + if test_spec.get("skip", False): + pytest.skip("Manually skipped in 'SKIP_TESTS'") + async_runner.use_spec(test_spec) + await async_runner.run() diff --git a/test_elasticsearch/test_server/test_rest_api_spec.py b/test_elasticsearch/test_server/test_rest_api_spec.py index cd3d24324..5d69fab19 100644 --- a/test_elasticsearch/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_server/test_rest_api_spec.py @@ -7,7 +7,9 @@ some integration tests. These files are shared among all official Elasticsearch clients. """ +import sys import re +import os from os import walk, environ from os.path import exists, join, dirname, pardir, relpath import yaml @@ -58,11 +60,17 @@ "indices/put_template/10_basic[4]", # depends on order of response JSON which is random "indices/simulate_index_template/10_basic[1]", + # body: null? body is {} + "indices/simulate_index_template/10_basic[2]", } XPACK_FEATURES = None ES_VERSION = None +RUN_ASYNC_REST_API_TESTS = ( + sys.version_info >= (3, 6) + and os.environ.get("PYTHON_CONNECTION_CLASS") == "RequestsHttpConnection" +) class YamlRunner: @@ -78,7 +86,7 @@ def __init__(self, client): def use_spec(self, test_spec): self._setup_code = test_spec.pop("setup", None) self._run_code = test_spec.pop("run", None) - self._teardown_code = test_spec.pop("teardown") + self._teardown_code = test_spec.pop("teardown", None) def setup(self): if self._setup_code: @@ -417,6 +425,8 @@ def sync_runner(sync_client): @pytest.mark.parametrize("test_spec", YAML_TEST_SPECS) def test_rest_api_spec(test_spec, sync_runner): + if RUN_ASYNC_REST_API_TESTS: + pytest.skip("Skipped running sync REST API tests") if test_spec.get("skip", False): pytest.skip("Manually skipped in 'SKIP_TESTS'") sync_runner.use_spec(test_spec)