From 51e9c829725c7f24724446656149c621a39389af Mon Sep 17 00:00:00 2001 From: Ivan Daschinskiy Date: Mon, 28 Dec 2020 14:20:46 +0300 Subject: [PATCH 01/62] IGNITE-13903: Add tox, docker-compose and travis integration This closes #1 --- .asf.yaml | 34 ++++++ .gitignore | 4 + .travis.yml | 34 ++++++ README.md | 33 ++++-- docker-compose.yml | 34 ++++++ tests/config/ssl.xml | 2 - tests/config/ssl/README.txt | 2 +- tests/config/ssl/generate_certificates.sh | 127 ++++++++++++++++++++++ tests/conftest.py | 18 +++ tests/test_examples.py | 8 +- tox.ini | 85 +++++++++++++++ 11 files changed, 364 insertions(+), 17 deletions(-) create mode 100644 .asf.yaml create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 docker-compose.yml create mode 100755 tests/config/ssl/generate_certificates.sh create mode 100644 tox.ini diff --git a/.asf.yaml b/.asf.yaml new file mode 100644 index 0000000..2fca9c0 --- /dev/null +++ b/.asf.yaml @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +github: + description: "Apache Ignite Python Thin Client" + homepage: https://ignite.apache.org/ + labels: + - ignite + - python + features: + wiki: false + issues: false + projects: false + enabled_merge_buttons: + squash: true + merge: false + rebase: false +notifications: + commits: commits@ignite.apache.org + issues: notifications@ignite.apache.org + pullrequests: notifications@ignite.apache.org + jira_options: link label worklog diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d9268c3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.eggs +.pytest_cache +.tox +pyignite.egg-info diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..230a9f1 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +sudo: required +services: + - docker + +env: + COMPOSE_VERSION: 1.27.4 + +before_install: + - curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose + - chmod +x docker-compose + - sudo mv docker-compose /usr/local/bin + +language: python +python: + - "3.6" + - "3.7" + - "3.8" +install: pip install tox-travis +script: tox \ No newline at end of file diff --git a/README.md b/README.md index 22732ce..26b9a6a 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Apache Ignite thin (binary protocol) client, written in Python 3. ## Prerequisites -- Python 3.4 or above (3.6 is tested), +- Python 3.4 or above (3.6, 3.7 and 3.8 are tested), - Access to Apache Ignite node, local or remote. The current thin client version was tested on Apache Ignite 2.7.0 (binary client protocol 1.2.0). @@ -19,8 +19,7 @@ $ pip install pyignite If you want to run tests, examples or build documentation, clone the whole repository: ``` -$ git clone git@github.com:apache/ignite.git -$ cd ignite/modules/platforms/python +$ git clone git@github.com:apache/ignite-python-thin-client.git $ pip install -e . ``` @@ -64,12 +63,32 @@ This code implies that it is run in the environment with `pyignite` package installed, and Apache Ignite node is running on localhost:10800. ## Testing -Run +*NB!* All tests require Apache Ignite node running on localhost:10800. For the convenience, `docker-compose.yml` is present. +So installing `docker` and `docker-compose` is recommended. Also, it is recommended installing `pyignite` in development +mode. You can do that using following command: ``` -$ cd ignite/modules/platforms/python -$ python setup.py pytest +$ pip install -e . +``` +### Run without ssl +``` +$ docker-compose down && docker-compose up -d ignite +$ pytest +``` +### Run with examples +``` +$ docker-compose down && docker-compose up -d ignite +$ pytest --examples +``` +### Run with ssl and not encrypted key +``` +$ docker-compose down && docker-compose up -d ignite +$ pytest --use-ssl=True --ssl-certfile=./tests/config/ssl/client_full.pem +``` +### Run with ssl and password-protected key +``` +$ docker-compose down && docker-compose up -d ignite +$ pytest --use-ssl=True --ssl-certfile=./tests/config/ssl/client_with_pass_full.pem --ssl-keyfile-password=654321 ``` -*NB!* All tests require Apache Ignite node running on localhost:10800. If you need to change the connection parameters, see the documentation on [testing](https://apache-ignite-binary-protocol-client.readthedocs.io/en/latest/readme.html#testing). diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..2517d25 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,34 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +services: + ignite: + image: apacheignite/ignite:latest + ports: + - 10800:10800 + restart: always + network_mode: host + + ignite-ssl: + image: apacheignite/ignite:latest + ports: + - 10800:10800 + restart: always + network_mode: host + volumes: + - ./tests/config:/config + environment: + CONFIG_URI: /config/ssl.xml + PYTHON_TEST_CONFIG_PATH: /config diff --git a/tests/config/ssl.xml b/tests/config/ssl.xml index d9d406f..8d74cbb 100644 --- a/tests/config/ssl.xml +++ b/tests/config/ssl.xml @@ -32,12 +32,10 @@ - - diff --git a/tests/config/ssl/README.txt b/tests/config/ssl/README.txt index eca07ea..da169fa 100644 --- a/tests/config/ssl/README.txt +++ b/tests/config/ssl/README.txt @@ -1,3 +1,3 @@ These files generated using script -`$IGNITE_SRC/modules/platforms/cpp/thin-client-test/config/ssl/generate_certificates.sh` +`./tests/config/ssl/generate_certificates.sh` To update them just run script and move files to this folder. \ No newline at end of file diff --git a/tests/config/ssl/generate_certificates.sh b/tests/config/ssl/generate_certificates.sh new file mode 100755 index 0000000..e4f41e2 --- /dev/null +++ b/tests/config/ssl/generate_certificates.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +OSSL=$(command -v openssl11) + +if [ -z "$OSSL" ] +then + OSSL=$(command -v openssl) +fi + +echo "Using following openssl: $OSSL" + +function generate_ca { + CA_KEY="$1.key" + CA_CRT="$1.crt" + OU="$2" + + # Generating CA private key and self-signed certificate + $OSSL req \ + -newkey rsa:2048 -nodes -sha256 -keyout $CA_KEY \ + -subj "/C=US/ST=Massachusetts/L=Wakefield/CN=ignite.apache.org/O=The Apache Software Foundation/OU=$OU/emailAddress=dev@ignite.apache.org" \ + -x509 -days 3650 -out $CA_CRT +} + +function generate_client_key_and_crt { + CA_KEY="$1.key" + CA_CRT="$1.crt" + CA_SRL="$1.srl" + CLIENT_KEY="$2.key" + CLIENT_CSR="$2.scr" + CLIENT_CRT="$2.crt" + OU="$3" + + # Generating client private key and certificate signature request to be used for certificate signing + $OSSL req \ + -newkey rsa:2048 -nodes -sha256 -keyout $CLIENT_KEY \ + -subj "/C=US/ST=Massachusetts/L=Wakefield/CN=ignite.apache.org/O=The Apache Software Foundation/OU=$OU/emailAddress=dev@ignite.apache.org" \ + -out $CLIENT_CSR + + # Signing client cerificate + $OSSL x509 -req \ + -in $CLIENT_CSR -CA $CA_CRT -CAkey $CA_KEY -CAcreateserial \ + -days 3650 -sha256 -out $CLIENT_CRT + + # Cleaning up. + rm -f $CLIENT_CSR + + # Protecting key with the password if required + if [ "$4" == "1" ]; then + openssl rsa -aes256 -in $CLIENT_KEY -passout pass:654321 -out $CLIENT_KEY + fi +} + +function generate_jks { + CA_CRT="$1.crt" + CA_JKS="$1.jks" + SERVER_KEY="$2.key" + SERVER_CRT="$2.crt" + SERVER_PEM="$2.pem" + SERVER_P12="$2.pkcs12" + SERVER_JKS="$2.jks" + + rm -f $CA_JKS $SERVER_JKS + + cat $SERVER_KEY $SERVER_CRT > $SERVER_PEM + + $OSSL pkcs12 -export -passout pass:123456 -out $SERVER_P12 -in $SERVER_PEM + + keytool -import -v -trustcacerts \ + -file $CA_CRT -alias certificateauthority -noprompt \ + -keystore $CA_JKS -deststorepass 123456 + + keytool -v -importkeystore \ + -srckeystore $SERVER_P12 -srcstoretype PKCS12 -srcstorepass 123456 \ + -destkeystore $SERVER_JKS -deststoretype JKS -deststorepass 123456 + + rm -f $SERVER_P12 $SERVER_PEM +} + +CA='ca' +CLIENT='client' +CLIENT_WITH_PASS='client_with_pass' +SERVER='server' +CA_UNKNOWN='ca_unknown' +CLIENT_UNKNOWN='client_unknown' + +generate_ca $CA 'Apache Ignite CA' +generate_client_key_and_crt $CA $CLIENT 'Apache Ignite Client Test' +generate_client_key_and_crt $CA $CLIENT_WITH_PASS 'Apache Ignite Client Test' 1 +generate_client_key_and_crt $CA $SERVER 'Apache Ignite Server Test' + +# We won't sign up any other certs so we do not need CA key or srl +rm -f "$CA.key" "$CA.srl" + +generate_jks $CA $SERVER + +generate_ca $CA_UNKNOWN 'Unknown CA' +generate_client_key_and_crt $CA_UNKNOWN $CLIENT_UNKNOWN 'Unknown Client' + +# We do not need this CA anymore +rm -f $CA_UNKNOWN* + +# Re-naming everything as needed +cat $CLIENT.key $CLIENT.crt > "$CLIENT"_full.pem +cat $CLIENT_WITH_PASS.key $CLIENT_WITH_PASS.crt > "$CLIENT_WITH_PASS"_full.pem +cat $CLIENT_UNKNOWN.key $CLIENT_UNKNOWN.crt > $CLIENT_UNKNOWN.pem +mv $CA.jks trust.jks +mv $CA.crt ca.pem + +rm -f $CLIENT.crt $CLIENT.key $CLIENT_WITH_PASS.key $CLIENT_WITH_PASS.crt $CLIENT_UNKNOWN.key $CLIENT_UNKNOWN.crt $SERVER_KEY $SERVER_CRT + + diff --git a/tests/conftest.py b/tests/conftest.py index f7e2e1f..8ebd5b8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -98,6 +98,18 @@ def cache(client): cache_destroy(client, cache_name) +@pytest.fixture +def examples(request): + return request.config.getoption("--examples") + + +@pytest.fixture(autouse=True) +def run_examples(request, examples): + if request.node.get_closest_marker('examples'): + if not examples: + pytest.skip('skipped examples: --examples is not passed') + + def pytest_addoption(parser): parser.addoption( '--ignite-host', @@ -225,3 +237,9 @@ def pytest_generate_tests(metafunc): if type(param) is not list: param = [param] metafunc.parametrize(param_name, param, scope='session') + + +def pytest_configure(config): + config.addinivalue_line( + "markers", "examples: mark test to run only if --examples are set" + ) diff --git a/tests/test_examples.py b/tests/test_examples.py index 4665d8c..046eb6d 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -39,13 +39,7 @@ def run_subprocess_35(script: str): ]).returncode -@pytest.mark.skipif( - condition=not pytest.config.option.examples, - reason=( - 'If you wish to test examples, invoke pytest with ' - '`--examples` option.' - ), -) +@pytest.mark.examples def test_examples(): for script in glob.glob1('../examples', '*.py'): if script not in SKIP_LIST: diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..6e70234 --- /dev/null +++ b/tox.ini @@ -0,0 +1,85 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[tox] +skipsdist = True +envlist = py{36,37,38}-{no-ssl,ssl,ssl-password}-docker + +[travis] +python = + 3.6: py36-{no-ssl,ssl,ssl-password}-docker + 3.7: py37-{no-ssl,ssl,ssl-password}-docker + 3.8: py38-{no-ssl,ssl,ssl-password}-docker + +[testenv] +passenv = TEAMCITY_VERSION +envdir = {homedir}/.virtualenvs/pyignite-{envname} +deps = + -r ./requirements/install.txt + -r ./requirements/tests.txt +recreate = True +usedevelop = True +allowlist_externals = docker-compose +commands = + pytest {env:PYTESTARGS:} {posargs} + +[no-ssl] +setenv: + PYTEST_ADDOPTS = --examples + +[ssl] +setenv: + PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_full.pem + +[ssl-password] +setenv: + PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_with_pass_full.pem --ssl-keyfile-password=654321 + +[docker] +commands_pre = + docker-compose down + docker-compose up -d ignite +commands_post = + docker-compose down + +[docker-ssl] +commands_pre = + docker-compose down + docker-compose up -d ignite-ssl +commands_post = {[docker]commands_post} + +[testenv:py{36,37,38}-no-ssl] +setenv: {[no-ssl]setenv} + +[testenv:py{36,37,38}-no-ssl-docker] +commands_pre = {[docker]commands_pre} +setenv: {[no-ssl]setenv} +commands_post = {[docker]commands_post} + +[testenv:py{36,37,38}-ssl] +setenv: {[ssl]setenv} + +[testenv:py{36,37,38}-ssl-docker] +commands_pre = {[docker-ssl]commands_pre} +setenv: {[ssl]setenv} +commands_post = {[docker]commands_post} + +[testenv:py{36,37,38}-ssl-password] +setenv: {[ssl-password]setenv} + +[testenv:py{36,37,38}-ssl-password-docker] +commands_pre = {[docker-ssl]commands_pre} +setenv: {[ssl-password]setenv} +commands_post = {[docker]commands_post} From 99ed735a3ac34542c633e8bb0c94c7707f32a7da Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Mon, 25 Jan 2021 17:14:11 +0300 Subject: [PATCH 02/62] IGNITE-11303: Partition Awareness for Python Thin --- .gitignore | 3 + .travis.yml | 16 +- docs/datatypes/parsers.rst | 176 +++---- docs/examples.rst | 143 ++++-- docs/readme.rst | 4 + examples/failover.py | 34 +- examples/get_and_put_complex.py | 68 +++ pyignite/api/__init__.py | 4 + pyignite/api/affinity.py | 135 ++++++ pyignite/api/binary.py | 4 +- pyignite/api/key_value.py | 157 +++++-- pyignite/api/result.py | 3 +- pyignite/api/sql.py | 142 +++--- pyignite/binary.py | 129 +++++- pyignite/cache.py | 318 +++++++++++-- pyignite/client.py | 254 +++++++++-- pyignite/connection/__init__.py | 337 ++++++++++---- pyignite/connection/generators.py | 48 -- pyignite/connection/handshake.py | 35 +- pyignite/connection/ssl.py | 20 +- pyignite/constants.py | 22 +- pyignite/datatypes/base.py | 30 +- pyignite/datatypes/complex.py | 295 +++++++----- pyignite/datatypes/internal.py | 63 ++- pyignite/datatypes/key_value.py | 10 +- pyignite/datatypes/null_object.py | 6 + pyignite/datatypes/primitive.py | 21 +- pyignite/datatypes/primitive_arrays.py | 95 +++- pyignite/datatypes/primitive_objects.py | 63 ++- pyignite/datatypes/standard.py | 108 ++++- .../datatypes/type_ids.py | 56 ++- pyignite/datatypes/type_names.py | 46 ++ pyignite/exceptions.py | 9 +- pyignite/queries/__init__.py | 317 +------------ pyignite/queries/op_codes.py | 2 + pyignite/queries/query.py | 164 +++++++ pyignite/queries/response.py | 428 ++++++++++++++++++ pyignite/utils.py | 142 +++++- requirements/tests.txt | 1 + tests/config/ignite-config-base.xml | 78 ++++ .../config/{ssl.xml => ignite-config-ssl.xml} | 19 +- tests/config/ignite-config.xml | 39 ++ tests/config/log4j.xml | 42 ++ tests/conftest.py | 157 +++++-- tests/test_affinity.py | 229 ++++++++++ tests/test_affinity_bad_servers.py | 63 +++ tests/test_affinity_request_routing.py | 179 ++++++++ tests/test_affinity_single_connection.py | 102 +++++ tests/test_binary.py | 26 ++ tests/test_cache_class.py | 21 +- tests/test_cache_config.py | 20 +- tests/test_datatypes.py | 49 +- tests/test_get_names.py | 6 +- tests/test_handshake.py | 64 --- tests/test_key_value.py | 180 +++++--- tests/test_scan.py | 18 +- tests/test_sql.py | 26 +- tests/util.py | 179 ++++++++ tox.ini | 43 +- 59 files changed, 4156 insertions(+), 1292 deletions(-) create mode 100644 examples/get_and_put_complex.py create mode 100644 pyignite/api/affinity.py delete mode 100644 pyignite/connection/generators.py rename docker-compose.yml => pyignite/datatypes/type_ids.py (54%) create mode 100644 pyignite/datatypes/type_names.py create mode 100644 pyignite/queries/query.py create mode 100644 pyignite/queries/response.py create mode 100644 tests/config/ignite-config-base.xml rename tests/config/{ssl.xml => ignite-config-ssl.xml} (77%) create mode 100644 tests/config/ignite-config.xml create mode 100644 tests/config/log4j.xml create mode 100644 tests/test_affinity.py create mode 100644 tests/test_affinity_bad_servers.py create mode 100644 tests/test_affinity_request_routing.py create mode 100644 tests/test_affinity_single_connection.py delete mode 100644 tests/test_handshake.py create mode 100644 tests/util.py diff --git a/.gitignore b/.gitignore index d9268c3..a779771 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,7 @@ +.idea .eggs .pytest_cache .tox pyignite.egg-info +ignite-log-* +__pycache__ \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 230a9f1..f884bdb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,16 +14,20 @@ # limitations under the License. sudo: required -services: - - docker + +addons: + apt: + packages: + - openjdk-8-jdk env: - COMPOSE_VERSION: 1.27.4 + - IGNITE_VERSION=2.9.1 IGNITE_HOME=/opt/ignite before_install: - - curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose - - chmod +x docker-compose - - sudo mv docker-compose /usr/local/bin + - curl -L https://apache-mirror.rbc.ru/pub/apache/ignite/${IGNITE_VERSION}/apache-ignite-slim-${IGNITE_VERSION}-bin.zip > ignite.zip + - unzip ignite.zip -d /opt + - mv /opt/apache-ignite-slim-${IGNITE_VERSION}-bin /opt/ignite + - mv /opt/ignite/libs/optional/ignite-log4j2 /opt/ignite/libs/ language: python python: diff --git a/docs/datatypes/parsers.rst b/docs/datatypes/parsers.rst index a717f4c..71f9aac 100644 --- a/docs/datatypes/parsers.rst +++ b/docs/datatypes/parsers.rst @@ -47,94 +47,94 @@ However, in some rare cases of type ambiguity, as well as for the needs of interoperability, you may have to sneak one or the other class, along with your data, in to some API function as a *type conversion hint*. -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|`type_code`|Apache Ignite |Python type |Parser/constructor | -| |docs reference |or class |class | -+===========+====================+===============================+=================================================================+ -|*Primitive data types* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x01 |Byte_ |int |:class:`~pyignite.datatypes.primitive_objects.ByteObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x02 |Short_ |int |:class:`~pyignite.datatypes.primitive_objects.ShortObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x03 |Int_ |int |:class:`~pyignite.datatypes.primitive_objects.IntObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x04 |Long_ |int |:class:`~pyignite.datatypes.primitive_objects.LongObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x05 |Float_ |float |:class:`~pyignite.datatypes.primitive_objects.FloatObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x06 |Double_ |float |:class:`~pyignite.datatypes.primitive_objects.DoubleObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x07 |Char_ |str |:class:`~pyignite.datatypes.primitive_objects.CharObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x08 |Bool_ |bool |:class:`~pyignite.datatypes.primitive_objects.BoolObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x65 |Null_ |NoneType |:class:`~pyignite.datatypes.null_object.Null` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Standard objects* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x09 |String_ |Str |:class:`~pyignite.datatypes.standard.String` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0a |UUID_ |uuid.UUID |:class:`~pyignite.datatypes.standard.UUIDObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x21 |Timestamp_ |tuple |:class:`~pyignite.datatypes.standard.TimestampObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0b |Date_ |datetime.datetime |:class:`~pyignite.datatypes.standard.DateObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x24 |Time_ |datetime.timedelta |:class:`~pyignite.datatypes.standard.TimeObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1e |Decimal_ |decimal.Decimal |:class:`~pyignite.datatypes.standard.DecimalObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1c |Enum_ |tuple |:class:`~pyignite.datatypes.standard.EnumObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x67 |`Binary enum`_ |tuple |:class:`~pyignite.datatypes.standard.BinaryEnumObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Arrays of primitives* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0c |`Byte array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ByteArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0d |`Short array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ShortArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0e |`Int array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.IntArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x0f |`Long array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.LongArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x10 |`Float array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.FloatArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x11 |`Double array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.DoubleArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x12 |`Char array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.CharArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x13 |`Bool array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.BoolArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Arrays of standard objects* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x14 |`String array`_ |iterable/list |:class:`~pyignite.datatypes.standard.StringArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x15 |`UUID array`_ |iterable/list |:class:`~pyignite.datatypes.standard.UUIDArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x22 |`Timestamp array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimestampArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x16 |`Date array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DateArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x23 |`Time array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimeArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1f |`Decimal array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DecimalArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|*Object collections, special types, and complex object* | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x17 |`Object array`_ |iterable/list |:class:`~pyignite.datatypes.complex.ObjectArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x18 |`Collection`_ |tuple |:class:`~pyignite.datatypes.complex.CollectionObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x19 |`Map`_ |dict, collections.OrderedDict |:class:`~pyignite.datatypes.complex.MapObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1d |`Enum array`_ |iterable/list |:class:`~pyignite.datatypes.standard.EnumArrayObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x67 |`Complex object`_ |object |:class:`~pyignite.datatypes.complex.BinaryObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ -|0x1b |`Wrapped data`_ |tuple |:class:`~pyignite.datatypes.complex.WrappedDataObject` | -+-----------+--------------------+-------------------------------+-----------------------------------------------------------------+ ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|`type_code`|Apache Ignite |Python type |Parser/constructor | +| |docs reference |or class |class | ++===========+====================+===============================+==================================================================+ +|*Primitive data types* | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x01 |Byte_ |int |:class:`~pyignite.datatypes.primitive_objects.ByteObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x02 |Short_ |int |:class:`~pyignite.datatypes.primitive_objects.ShortObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x03 |Int_ |int |:class:`~pyignite.datatypes.primitive_objects.IntObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x04 |Long_ |int |:class:`~pyignite.datatypes.primitive_objects.LongObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x05 |Float_ |float |:class:`~pyignite.datatypes.primitive_objects.FloatObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x06 |Double_ |float |:class:`~pyignite.datatypes.primitive_objects.DoubleObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x07 |Char_ |str |:class:`~pyignite.datatypes.primitive_objects.CharObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x08 |Bool_ |bool |:class:`~pyignite.datatypes.primitive_objects.BoolObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x65 |Null_ |NoneType |:class:`~pyignite.datatypes.null_object.Null` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Standard objects* | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x09 |String_ |Str |:class:`~pyignite.datatypes.standard.String` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0a |UUID_ |uuid.UUID |:class:`~pyignite.datatypes.standard.UUIDObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x21 |Timestamp_ |tuple |:class:`~pyignite.datatypes.standard.TimestampObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0b |Date_ |datetime.datetime |:class:`~pyignite.datatypes.standard.DateObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x24 |Time_ |datetime.timedelta |:class:`~pyignite.datatypes.standard.TimeObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1e |Decimal_ |decimal.Decimal |:class:`~pyignite.datatypes.standard.DecimalObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1c |Enum_ |tuple |:class:`~pyignite.datatypes.standard.EnumObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x67 |`Binary enum`_ |tuple |:class:`~pyignite.datatypes.standard.BinaryEnumObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Arrays of primitives* | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0c |`Byte array`_ |iterable/bytearray |:class:`~pyignite.datatypes.primitive_arrays.ByteArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0d |`Short array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ShortArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0e |`Int array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.IntArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0f |`Long array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.LongArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x10 |`Float array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.FloatArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x11 |`Double array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.DoubleArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x12 |`Char array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.CharArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x13 |`Bool array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.BoolArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Arrays of standard objects* | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x14 |`String array`_ |iterable/list |:class:`~pyignite.datatypes.standard.StringArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x15 |`UUID array`_ |iterable/list |:class:`~pyignite.datatypes.standard.UUIDArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x22 |`Timestamp array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimestampArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x16 |`Date array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DateArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x23 |`Time array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimeArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1f |`Decimal array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DecimalArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Object collections, special types, and complex object* | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x17 |`Object array`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.ObjectArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x18 |`Collection`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.CollectionObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x19 |`Map`_ |tuple[int, dict/OrderedDict] |:class:`~pyignite.datatypes.complex.MapObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1d |`Enum array`_ |iterable/list |:class:`~pyignite.datatypes.standard.EnumArrayObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x67 |`Complex object`_ |object |:class:`~pyignite.datatypes.complex.BinaryObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1b |`Wrapped data`_ |tuple[int, bytes] |:class:`~pyignite.datatypes.complex.WrappedDataObject` | ++-----------+--------------------+-------------------------------+------------------------------------------------------------------+ .. _Byte: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-byte .. _Short: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-short diff --git a/docs/examples.rst b/docs/examples.rst index 3d8d2d9..39deef3 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -121,6 +121,62 @@ Destroy created cache and close connection. .. _sql_examples: +Object collections +------------------ + +File: `get_and_put_complex.py`_. + +Ignite collection types are represented in `pyignite` as two-tuples. +First comes collection type ID or deserialization hint, which is specific for +each of the collection type. Second comes the data value. + +.. literalinclude:: ../examples/get_and_put_complex.py + :language: python + :lines: 19-21 + +Map +=== + +For Python prior to 3.6, it might be important to distinguish between ordered +(`collections.OrderedDict`) and unordered (`dict`) dictionary types, so you +could use :py:attr:`~pyignite.datatypes.complex.Map.LINKED_HASH_MAP` +for the former and :py:attr:`~pyignite.datatypes.complex.Map.HASH_MAP` +for the latter. + +Since CPython 3.6 all dictionaries became de facto ordered. You can always use +`LINKED_HASH_MAP` as a safe default. + +.. literalinclude:: ../examples/get_and_put_complex.py + :language: python + :lines: 29-41 + +Collection +========== + +See :class:`~pyignite.datatypes.complex.CollectionObject` and Ignite +documentation on `Collection`_ type for the description of various Java +collection types. Note that not all of them have a direct Python +representative. For example, Python do not have ordered sets (it is indeed +recommended to use `OrderedDict`'s keys and disregard its values). + +As for the `pyignite`, the rules are simple: pass any iterable as a data, +and you always get `list` back. + +.. literalinclude:: ../examples/get_and_put_complex.py + :language: python + :lines: 43-57 + +Object array +============ + +:class:`~pyignite.datatypes.complex.ObjectArrayObject` has a very limited +functionality in `pyignite`, since no type checks can be enforced on its +contents. But it still can be used for interoperability with Java. + +.. literalinclude:: ../examples/get_and_put_complex.py + :language: python + :lines: 59-68 + SQL --- File: `sql.py`_. @@ -241,7 +297,23 @@ Here you can see how :class:`~pyignite.binary.GenericObjectMeta` uses `attrs`_ package internally for creating nice `__init__()` and `__repr__()` methods. -You can reuse the autogenerated class for subsequent writes: +In this case the autogenerated dataclass's name `Person` is exactly matches +the type name of the Complex object it represents (the content of the +:py:attr:`~pyignite.datatypes.base.IgniteDataTypeProps.type_name` property). +But when Complex object's class name contains characters, that can not be used +in a Python identifier, for example: + +- `.`, when fully qualified Java class names are used, +- `$`, a common case for Scala classes, +- `+`, internal class name separator in C#, + +then `pyignite` can not maintain this match. In such cases `pyignite` tries +to sanitize a type name to derive a “good” dataclass name from it. + +If your code needs consistent naming between the server and the client, make +sure that your Ignite cluster is configured to use `simple class names`_. + +Anyway, you can reuse the autogenerated dataclass for subsequent writes: .. literalinclude:: ../examples/binary_basics.py :language: python @@ -445,27 +517,24 @@ When connection to the server is broken or timed out, (`OSError` or `SocketError`), but keeps its constructor's parameters intact and tries to reconnect transparently. -When there's no way for :class:`~pyignite.client.Client` to reconnect, it -raises a special :class:`~pyignite.exceptions.ReconnectError` exception. +When :class:`~pyignite.client.Client` detects that all nodes in the list are +failed without the possibility of restoring connection, it raises a special +:class:`~pyignite.exceptions.ReconnectError` exception. -The following example features a simple node list traversal failover mechanism. Gather 3 Ignite nodes on `localhost` into one cluster and run: .. literalinclude:: ../examples/failover.py :language: python - :lines: 16-49 + :lines: 16-51 Then try shutting down and restarting nodes, and see what happens. .. literalinclude:: ../examples/failover.py :language: python - :lines: 51-61 + :lines: 53-65 Client reconnection do not require an explicit user action, like calling -a special method or resetting a parameter. Note, however, that reconnection -is lazy: it happens only if (and when) it is needed. In this example, -the automatic reconnection happens, when the script checks upon the last -saved value: +a special method or resetting a parameter. .. literalinclude:: ../examples/failover.py :language: python @@ -475,29 +544,6 @@ It means that instead of checking the connection status it is better for `pyignite` user to just try the supposed data operations and catch the resulting exception. -:py:meth:`~pyignite.connection.Connection.connect` method accepts any -iterable, not just list. It means that you can implement any reconnection -policy (round-robin, nodes prioritization, pause on reconnect or graceful -backoff) with a generator. - -`pyignite` comes with a sample -:class:`~pyignite.connection.generators.RoundRobin` generator. In the above -example try to replace - -.. literalinclude:: ../examples/failover.py - :language: python - :lines: 29 - -with - -.. code-block:: python3 - - client.connect(RoundRobin(nodes, max_reconnects=20)) - -The client will try to reconnect to node 1 after node 3 is crashed, then to -node 2, et c. At least one node should be active for the -:class:`~pyignite.connection.generators.RoundRobin` to work properly. - SSL/TLS ------- @@ -604,21 +650,24 @@ with the following message: # pyignite.exceptions.HandshakeError: Handshake error: Unauthenticated sessions are prohibited. -.. _get_and_put.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/get_and_put.py -.. _type_hints.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/type_hints.py -.. _failover.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/failover.py -.. _scans.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/scans.py -.. _sql.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/sql.py -.. _binary_basics.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/binary_basics.py -.. _read_binary.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/read_binary.py -.. _create_binary.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/create_binary.py -.. _migrate_binary.py: https://github.com/apache/ignite/tree/master/modules/platforms/python/examples/migrate_binary.py -.. _Getting Started: https://apacheignite-sql.readme.io/docs/getting-started -.. _Ignite GitHub repository: https://github.com/apache/ignite/blob/master/examples/sql/world.sql -.. _Complex object: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-complex-object +.. _get_and_put.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/get_and_put.py +.. _type_hints.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/type_hints.py +.. _failover.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/failover.py +.. _scans.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/scans.py +.. _sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/sql.py +.. _binary_basics.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/binary_basics.py +.. _read_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/read_binary.py +.. _create_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/create_binary.py +.. _migrate_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/migrate_binary.py +.. _Getting Started: https://ignite.apache.org/docs/latest/thin-clients/python-thin-client +.. _PyIgnite GitHub repository: https://github.com/apache/ignite-python-thin-client/blob/master +.. _Complex object: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#complex-object .. _Java keytool: https://docs.oracle.com/javase/8/docs/technotes/tools/unix/keytool.html -.. _Securing Connection Between Nodes: https://apacheignite.readme.io/docs/ssltls#section-securing-connection-between-nodes +.. _Securing Connection Between Nodes: https://ignite.apache.org/docs/latest/security/ssl-tls#ssltls-for-nodes .. _ClientConnectorConfiguration: https://ignite.apache.org/releases/latest/javadoc/org/apache/ignite/configuration/ClientConnectorConfiguration.html .. _openssl: https://www.openssl.org/docs/manmaster/man1/openssl.html -.. _Authentication: https://apacheignite.readme.io/docs/advanced-security#section-authentication +.. _Authentication: https://ignite.apache.org/docs/latest/security/authentication .. _attrs: https://pypi.org/project/attrs/ +.. _get_and_put_complex.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/get_and_put.py +.. _Collection: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#collection +.. _simple class names: https://ignite.apache.org/docs/latest/data-modeling/binary-marshaller#binary-name-mapper-and-binary-id-mapper diff --git a/docs/readme.rst b/docs/readme.rst index f91274e..81298ae 100644 --- a/docs/readme.rst +++ b/docs/readme.rst @@ -141,6 +141,10 @@ Other `pytest` parameters: ``--timeout`` − timeout (in seconds) for each socket operation, including `connect`. Accepts integer or float value. Default is None (blocking mode), +``--partition-aware`` − experimental; off by default; turns on the partition +awareness: a way for the thin client to calculate a data placement for the +given key. + ``--username`` and ``--password`` − credentials to authenticate to Ignite cluster. Used in conjunction with `authenticationEnabled` property in cluster configuration. diff --git a/examples/failover.py b/examples/failover.py index 3a5fcce..7911ce0 100644 --- a/examples/failover.py +++ b/examples/failover.py @@ -27,35 +27,39 @@ client = Client(timeout=4.0) client.connect(nodes) -print('Connected to {}'.format(client)) +print('Connected') my_cache = client.get_or_create_cache({ PROP_NAME: 'my_cache', - PROP_CACHE_MODE: CacheMode.REPLICATED, + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_BACKUPS_NUMBER: 2, }) my_cache.put('test_key', 0) +test_value = 0 # abstract main loop while True: try: # do the work - test_value = my_cache.get('test_key') + test_value = my_cache.get('test_key') or 0 my_cache.put('test_key', test_value + 1) except (OSError, SocketError) as e: # recover from error (repeat last command, check data # consistency or just continue − depends on the task) print('Error: {}'.format(e)) - print('Last value: {}'.format(my_cache.get('test_key'))) - print('Reconnected to {}'.format(client)) + print('Last value: {}'.format(test_value)) + print('Reconnecting') -# Connected to 127.0.0.1:10800 -# Error: [Errno 104] Connection reset by peer -# Last value: 6999 -# Reconnected to 127.0.0.1:10801 -# Error: Socket connection broken. -# Last value: 12302 -# Reconnected to 127.0.0.1:10802 -# Error: [Errno 111] Client refused +# Connected +# Error: Connection broken. +# Last value: 2650 +# Reconnecting +# Error: Connection broken. +# Last value: 10204 +# Reconnecting +# Error: Connection broken. +# Last value: 18932 +# Reconnecting # Traceback (most recent call last): -# ... -# pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes +# ... +# pyignite.exceptions.ReconnectError: Can not reconnect: out of nodes. diff --git a/examples/get_and_put_complex.py b/examples/get_and_put_complex.py new file mode 100644 index 0000000..2444612 --- /dev/null +++ b/examples/get_and_put_complex.py @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +from pyignite import Client +from pyignite.datatypes import ( + CollectionObject, MapObject, ObjectArrayObject, +) + + +client = Client() +client.connect('127.0.0.1', 10800) + +my_cache = client.get_or_create_cache('my cache') + +value = OrderedDict([(1, 'test'), ('key', 2.0)]) + +# saving ordered dictionary +type_id = MapObject.LINKED_HASH_MAP +my_cache.put('my dict', (type_id, value)) +result = my_cache.get('my dict') +print(result) # (2, OrderedDict([(1, 'test'), ('key', 2.0)])) + +# saving unordered dictionary +type_id = MapObject.HASH_MAP +my_cache.put('my dict', (type_id, value)) +result = my_cache.get('my dict') +print(result) # (1, {'key': 2.0, 1: 'test'}) + +type_id = CollectionObject.LINKED_LIST +value = [1, '2', 3.0] + +my_cache.put('my list', (type_id, value)) + +result = my_cache.get('my list') +print(result) # (2, [1, '2', 3.0]) + +type_id = CollectionObject.HASH_SET +value = [4, 4, 'test', 5.6] + +my_cache.put('my set', (type_id, value)) + +result = my_cache.get('my set') +print(result) # (3, [5.6, 4, 'test']) + +type_id = ObjectArrayObject.OBJECT +value = [7, '8', 9.0] + +my_cache.put( + 'my array of objects', + (type_id, value), + value_hint=ObjectArrayObject # this hint is mandatory! +) +result = my_cache.get('my array of objects') +print(result) # (-1, [7, '8', 9.0]) diff --git a/pyignite/api/__init__.py b/pyignite/api/__init__.py index 01437f0..7dbef0a 100644 --- a/pyignite/api/__init__.py +++ b/pyignite/api/__init__.py @@ -23,6 +23,9 @@ stable end user API see :mod:`pyignite.client` module. """ +from .affinity import ( + cache_get_node_partitions, +) from .cache_config import ( cache_create, cache_get_names, @@ -54,6 +57,7 @@ cache_remove_keys, cache_remove_all, cache_get_size, + cache_local_peek, ) from .sql import ( scan, diff --git a/pyignite/api/affinity.py b/pyignite/api/affinity.py new file mode 100644 index 0000000..d28cfb8 --- /dev/null +++ b/pyignite/api/affinity.py @@ -0,0 +1,135 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Iterable, Union + +from pyignite.datatypes import Bool, Int, Long, UUIDObject +from pyignite.datatypes.internal import StructArray, Conditional, Struct +from pyignite.queries import Query +from pyignite.queries.op_codes import OP_CACHE_PARTITIONS +from pyignite.utils import is_iterable +from .result import APIResult + + +cache_ids = StructArray([ + ('cache_id', Int), +]) + +cache_config = StructArray([ + ('key_type_id', Int), + ('affinity_key_field_id', Int), +]) + +node_partitions = StructArray([ + ('partition_id', Int), +]) + +node_mapping = StructArray([ + ('node_uuid', UUIDObject), + ('node_partitions', node_partitions) +]) + +cache_mapping = StructArray([ + ('cache_id', Int), + ('cache_config', cache_config), +]) + +empty_cache_mapping = StructArray([ + ('cache_id', Int) +]) + +empty_node_mapping = Struct([]) + +partition_mapping = StructArray([ + ('is_applicable', Bool), + + ('cache_mapping', Conditional(lambda ctx: ctx['is_applicable'] == b'\x01', + lambda ctx: ctx['is_applicable'] is True, + cache_mapping, empty_cache_mapping)), + + ('node_mapping', Conditional(lambda ctx: ctx['is_applicable'] == b'\x01', + lambda ctx: ctx['is_applicable'] is True, + node_mapping, empty_node_mapping)), +]) + + +def cache_get_node_partitions( + conn: 'Connection', caches: Union[int, Iterable[int]], + query_id: int = None, +) -> APIResult: + """ + Gets partition mapping for an Ignite cache or a number of caches. See + “IEP-23: Best Effort Affinity for thin clients”. + + :param conn: connection to Ignite server, + :param caches: cache ID(s) the mapping is provided for, + :param query_id: (optional) a value generated by client and returned as-is + in response.query_id. When the parameter is omitted, a random value + is generated, + :return: API result data object. + """ + query_struct = Query( + OP_CACHE_PARTITIONS, + [ + ('cache_ids', cache_ids), + ], + query_id=query_id + ) + if not is_iterable(caches): + caches = [caches] + + result = query_struct.perform( + conn, + query_params={ + 'cache_ids': [{'cache_id': cache} for cache in caches], + }, + response_config=[ + ('version_major', Long), + ('version_minor', Int), + ('partition_mapping', partition_mapping), + ], + ) + if result.status == 0: + # tidying up the result + value = { + 'version': ( + result.value['version_major'], + result.value['version_minor'] + ), + 'partition_mapping': [], + } + for i, partition_map in enumerate(result.value['partition_mapping']): + cache_id = partition_map['cache_mapping'][0]['cache_id'] + value['partition_mapping'].insert( + i, + { + 'cache_id': cache_id, + 'is_applicable': partition_map['is_applicable'], + } + ) + if partition_map['is_applicable']: + value['partition_mapping'][i]['cache_config'] = { + a['key_type_id']: a['affinity_key_field_id'] + for a in partition_map['cache_mapping'][0]['cache_config'] + } + value['partition_mapping'][i]['node_mapping'] = { + p['node_uuid']: [ + x['partition_id'] for x in p['node_partitions'] + ] + for p in partition_map['node_mapping'] + } + result.value = value + + return result diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index f0a5831..97f9fbd 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -20,7 +20,7 @@ body_struct, enum_struct, schema_struct, binary_fields_struct, ) from pyignite.datatypes import String, Int, Bool -from pyignite.queries import Query, Response +from pyignite.queries import Query, get_response_class from pyignite.queries.op_codes import * from pyignite.utils import int_overflow, entity_id from .result import APIResult @@ -53,7 +53,7 @@ def get_binary_type( }) connection.send(send_buffer) - response_head_struct = Response([ + response_head_struct = get_response_class(connection)([ ('type_exists', Bool), ]) response_head_type, recv_buffer = response_head_struct.parse(connection) diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 56f5378..25601e9 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -13,20 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, Union +from typing import Any, Iterable, Optional, Union from pyignite.queries.op_codes import * from pyignite.datatypes import ( Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, ) from pyignite.datatypes.key_value import PeekModes -from pyignite.queries import Query, Response +from pyignite.queries import Query from pyignite.utils import cache_id def cache_put( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache (overwriting existing value if any). @@ -67,8 +68,9 @@ def cache_put( def cache_get( - connection: 'Connection', cache: Union[str, int], key, - key_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Retrieves a value from cache by key. @@ -115,7 +117,7 @@ def cache_get( def cache_get_all( connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary=False, query_id=None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Retrieves multiple key-value pairs from cache. @@ -160,7 +162,7 @@ def cache_get_all( def cache_put_all( connection: 'Connection', cache: Union[str, int], pairs: dict, - binary=False, query_id=None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts multiple key-value pairs to cache (overwriting existing associations @@ -200,8 +202,9 @@ def cache_put_all( def cache_contains_key( - connection: 'Connection', cache: Union[str, int], key, - key_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Returns a value indicating whether given key is present in cache. @@ -248,7 +251,7 @@ def cache_contains_key( def cache_contains_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary=False, query_id=None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Returns a value indicating whether all given keys are present in cache. @@ -292,8 +295,9 @@ def cache_contains_keys( def cache_get_and_put( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache, and returns the previous value @@ -345,8 +349,9 @@ def cache_get_and_put( def cache_get_and_replace( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache, returning previous value @@ -397,8 +402,9 @@ def cache_get_and_replace( def cache_get_and_remove( - connection: 'Connection', cache: Union[str, int], key, - key_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Removes the cache entry with specified key, returning the value. @@ -442,8 +448,9 @@ def cache_get_and_remove( def cache_put_if_absent( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache only if the key @@ -494,8 +501,9 @@ def cache_put_if_absent( def cache_get_and_put_if_absent( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not @@ -546,8 +554,9 @@ def cache_get_and_put_if_absent( def cache_replace( - connection: 'Connection', cache: Union[str, int], key, value, - key_hint=None, value_hint=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exist. @@ -598,9 +607,10 @@ def cache_replace( def cache_replace_if_equals( - connection: 'Connection', cache: Union[str, int], key, sample, value, - key_hint=None, sample_hint=None, value_hint=None, - binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], + key: Any, sample: Any, value: Any, key_hint: 'IgniteDatatType' = None, + sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exists @@ -657,8 +667,8 @@ def cache_replace_if_equals( def cache_clear( - connection: 'Connection', cache: Union[str, int], binary=False, - query_id=None, + connection: 'Connection', cache: Union[str, int], + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Clears the cache without notifying listeners or cache writers. @@ -692,8 +702,9 @@ def cache_clear( def cache_clear_key( - connection: 'Connection', cache: Union[str, int], key, - key_hint: object=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -733,7 +744,7 @@ def cache_clear_key( def cache_clear_keys( connection: 'Connection', cache: Union[str, int], keys: list, - binary=False, query_id=None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Clears the cache keys without notifying listeners or cache writers. @@ -770,8 +781,9 @@ def cache_clear_keys( def cache_remove_key( - connection: 'Connection', cache: Union[str, int], key, - key_hint: object=None, binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -817,9 +829,9 @@ def cache_remove_key( def cache_remove_if_equals( - connection: 'Connection', cache: Union[str, int], key, sample, - key_hint=None, sample_hint=None, - binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Removes an entry with a given key if provided value is equal to @@ -872,7 +884,7 @@ def cache_remove_if_equals( def cache_remove_keys( connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary=False, query_id=None, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Removes entries with given keys, notifying listeners and cache writers. @@ -909,8 +921,8 @@ def cache_remove_keys( def cache_remove_all( - connection: 'Connection', cache: Union[str, int], binary=False, - query_id=None, + connection: 'Connection', cache: Union[str, int], + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Removes all entries from cache, notifying listeners and cache writers. @@ -944,8 +956,8 @@ def cache_remove_all( def cache_get_size( - connection: 'Connection', cache: Union[str, int], peek_modes=0, - binary=False, query_id=None, + connection: 'Connection', cache: Union[str, int], peek_modes: int = 0, + binary: bool = False, query_id: Optional[int] = None, ) -> 'APIResult': """ Gets the number of entries in cache. @@ -965,10 +977,7 @@ def cache_get_size( otherwise. """ if not isinstance(peek_modes, (list, tuple)): - if peek_modes == 0: - peek_modes = [] - else: - peek_modes = [peek_modes] + peek_modes = [peek_modes] if peek_modes else [] query_struct = Query( OP_CACHE_GET_SIZE, @@ -993,3 +1002,61 @@ def cache_get_size( if result.status == 0: result.value = result.value['count'] return result + + +def cache_local_peek( + conn: 'Connection', cache: Union[str, int], + key: Any, key_hint: 'IgniteDataType' = None, peek_modes: int = 0, + binary: bool = False, query_id: Optional[int] = None, +) -> 'APIResult': + """ + Peeks at in-memory cached value using default optional peek mode. + + This method will not load value from any persistent store or from a remote + node. + + :param conn: connection: connection to Ignite server, + :param cache: name or ID of the cache, + :param key: entry key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param peek_modes: (optional) limit count to near cache partition + (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache + (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), + :param binary: (optional) pass True to keep the value in binary form. + False by default, + :param query_id: (optional) a value generated by client and returned as-is + in response.query_id. When the parameter is omitted, a random value + is generated, + :return: API result data object. Contains zero status and a peeked value + (null if not found). + """ + if not isinstance(peek_modes, (list, tuple)): + peek_modes = [peek_modes] if peek_modes else [] + + query_struct = Query( + OP_CACHE_LOCAL_PEEK, + [ + ('hash_code', Int), + ('flag', Byte), + ('key', key_hint or AnyDataObject), + ('peek_modes', PeekModes), + ], + query_id=query_id, + ) + result = query_struct.perform( + conn, + query_params={ + 'hash_code': cache_id(cache), + 'flag': 1 if binary else 0, + 'key': key, + 'peek_modes': peek_modes, + }, + response_config=[ + ('value', AnyDataObject), + ], + ) + if result.status != 0: + return result + result.value = result.value['value'] + return result diff --git a/pyignite/api/result.py b/pyignite/api/result.py index 864ef61..f60a437 100644 --- a/pyignite/api/result.py +++ b/pyignite/api/result.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pyignite.queries.op_codes import OP_SUCCESS from pyignite.datatypes import String @@ -32,7 +33,7 @@ class APIResult: value = None def __init__(self, response: 'Response'): - self.status = response.status_code + self.status = getattr(response, 'status_code', OP_SUCCESS) self.query_id = response.query_id if hasattr(response, 'error_message'): self.message = String.to_python(response.error_message) diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index 1a18496..ebb3e30 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -20,25 +20,27 @@ from typing import Union +from pyignite.constants import * from pyignite.datatypes import ( AnyDataArray, AnyDataObject, Bool, Byte, Int, Long, Map, Null, String, StructArray, ) from pyignite.datatypes.sql import StatementType -from pyignite.queries import Query, Response, SQLResponse +from pyignite.queries import Query from pyignite.queries.op_codes import * from pyignite.utils import cache_id from .result import APIResult def scan( - connection: 'Connection', cache: Union[str, int], page_size: int, - partitions: int=-1, local: bool=False, binary: bool=False, query_id=None, + conn: 'Connection', cache: Union[str, int], page_size: int, + partitions: int = -1, local: bool = False, binary: bool = False, + query_id: int = None, ) -> APIResult: """ Performs scan query. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cache: name or ID of the cache, :param page_size: cursor page size, :param partitions: (optional) number of partitions to query @@ -75,7 +77,7 @@ def scan( query_id=query_id, ) result = query_struct.perform( - connection, + conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -96,13 +98,13 @@ def scan( def scan_cursor_get_page( - connection: 'Connection', cursor: int, query_id=None, + conn: 'Connection', cursor: int, query_id: int = None, ) -> APIResult: """ Fetches the next scan query cursor page by cursor ID that is obtained from `scan` function. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -126,7 +128,7 @@ def scan_cursor_get_page( query_id=query_id, ) result = query_struct.perform( - connection, + conn, query_params={ 'cursor': cursor, }, @@ -141,16 +143,17 @@ def scan_cursor_get_page( def sql( - connection: 'Connection', cache: Union[str, int], + conn: 'Connection', cache: Union[str, int], table_name: str, query_str: str, page_size: int, query_args=None, - distributed_joins: bool=False, replicated_only: bool=False, - local: bool=False, timeout: int=0, binary: bool=False, query_id=None + distributed_joins: bool = False, replicated_only: bool = False, + local: bool = False, timeout: int = 0, binary: bool = False, + query_id: int = None ) -> APIResult: """ Executes an SQL query over data stored in the cluster. The query returns the whole record (key and value). - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cache: name or ID of the cache, :param table_name: name of a type or SQL table, :param query_str: SQL query string, @@ -200,7 +203,7 @@ def sql( query_id=query_id, ) result = query_struct.perform( - connection, + conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -225,12 +228,12 @@ def sql( def sql_cursor_get_page( - connection: 'Connection', cursor: int, query_id=None, + conn: 'Connection', cursor: int, query_id: int = None, ) -> APIResult: """ Retrieves the next SQL query cursor page by cursor ID from `sql`. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -254,7 +257,7 @@ def sql_cursor_get_page( query_id=query_id, ) result = query_struct.perform( - connection, + conn, query_params={ 'cursor': cursor, }, @@ -269,18 +272,18 @@ def sql_cursor_get_page( def sql_fields( - connection: 'Connection', cache: Union[str, int], - query_str: str, page_size: int, query_args=None, schema: str=None, - statement_type: int=StatementType.ANY, distributed_joins: bool=False, - local: bool=False, replicated_only: bool=False, - enforce_join_order: bool=False, collocated: bool=False, lazy: bool=False, - include_field_names: bool=False, max_rows: int=-1, timeout: int=0, - binary: bool=False, query_id=None + conn: 'Connection', cache: Union[str, int], + query_str: str, page_size: int, query_args=None, schema: str = None, + statement_type: int = StatementType.ANY, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, + timeout: int = 0, binary: bool = False, query_id: int = None ) -> APIResult: """ Performs SQL fields query. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cache: name or ID of the cache, :param query_str: SQL query string, :param page_size: cursor page size, @@ -351,48 +354,39 @@ def sql_fields( query_id=query_id, ) - _, send_buffer = query_struct.from_python({ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, - 'schema': schema, - 'page_size': page_size, - 'max_rows': max_rows, - 'query_str': query_str, - 'query_args': query_args, - 'statement_type': statement_type, - 'distributed_joins': distributed_joins, - 'local': local, - 'replicated_only': replicated_only, - 'enforce_join_order': enforce_join_order, - 'collocated': collocated, - 'lazy': lazy, - 'timeout': timeout, - 'include_field_names': include_field_names, - }) - - connection.send(send_buffer) - - response_struct = SQLResponse( + return query_struct.perform( + conn, + query_params={ + 'hash_code': cache_id(cache), + 'flag': 1 if binary else 0, + 'schema': schema, + 'page_size': page_size, + 'max_rows': max_rows, + 'query_str': query_str, + 'query_args': query_args, + 'statement_type': statement_type, + 'distributed_joins': distributed_joins, + 'local': local, + 'replicated_only': replicated_only, + 'enforce_join_order': enforce_join_order, + 'collocated': collocated, + 'lazy': lazy, + 'timeout': timeout, + 'include_field_names': include_field_names, + }, + sql=True, include_field_names=include_field_names, has_cursor=True, ) - response_class, recv_buffer = response_struct.parse(connection) - response = response_class.from_buffer_copy(recv_buffer) - - result = APIResult(response) - if result.status != 0: - return result - result.value = response_struct.to_python(response) - return result def sql_fields_cursor_get_page( - connection: 'Connection', cursor: int, field_count: int, query_id=None, + conn: 'Connection', cursor: int, field_count: int, query_id: int = None, ) -> APIResult: """ Retrieves the next query result page by cursor ID from `sql_fields`. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, :param field_count: a number of fields in a row, :param query_id: (optional) a value generated by client and returned as-is @@ -416,26 +410,20 @@ def sql_fields_cursor_get_page( ], query_id=query_id, ) - - _, send_buffer = query_struct.from_python({ - 'cursor': cursor, - }) - - connection.send(send_buffer) - - response_struct = Response([ - ('data', StructArray([ - ('field_{}'.format(i), AnyDataObject) for i in range(field_count) - ])), - ('more', Bool), - ]) - response_class, recv_buffer = response_struct.parse(connection) - response = response_class.from_buffer_copy(recv_buffer) - - result = APIResult(response) + result = query_struct.perform( + conn, + query_params={ + 'cursor': cursor, + }, + response_config=[ + ('data', StructArray([(f'field_{i}', AnyDataObject) for i in range(field_count)])), + ('more', Bool), + ] + ) if result.status != 0: return result - value = response_struct.to_python(response) + + value = result.value result.value = { 'data': [], 'more': value['more'] @@ -446,12 +434,12 @@ def sql_fields_cursor_get_page( def resource_close( - connection: 'Connection', cursor: int, query_id=None + conn: 'Connection', cursor: int, query_id: int = None ) -> APIResult: """ Closes a resource, such as query cursor. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param cursor: cursor ID, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -468,7 +456,7 @@ def resource_close( query_id=query_id, ) return query_struct.perform( - connection, + conn, query_params={ 'cursor': cursor, }, diff --git a/pyignite/binary.py b/pyignite/binary.py index e726730..99f2f02 100644 --- a/pyignite/binary.py +++ b/pyignite/binary.py @@ -26,13 +26,16 @@ """ from collections import OrderedDict +import ctypes from typing import Any import attr +from pyignite.constants import * from .datatypes import * +from .datatypes.base import IgniteDataTypeProps from .exceptions import ParseError -from .utils import entity_id, schema_id +from .utils import entity_id, hashcode, schema_id ALLOWED_FIELD_TYPES = [ @@ -48,21 +51,11 @@ ] -class GenericObjectPropsMixin: +class GenericObjectProps(IgniteDataTypeProps): """ This class is mixed both to metaclass and to resulting class to make class properties universally available. You should not subclass it directly. """ - @property - def type_name(self) -> str: - """ Binary object type name. """ - return self._type_name - - @property - def type_id(self) -> int: - """ Binary object type ID. """ - return entity_id(self._type_name) - @property def schema(self) -> OrderedDict: """ Binary object schema. """ @@ -76,20 +69,21 @@ def schema_id(self) -> int: def __new__(cls, *args, **kwargs) -> Any: # allow all items in Binary Object schema to be populated as optional # arguments to `__init__()` with sensible defaults. - if cls is not GenericObjectMeta: - attributes = { - k: attr.ib( - type=getattr(v, 'pythonic', type(None)), - default=getattr(v, 'default', None), - ) for k, v in cls.schema.items() - } - attributes.update({'version': attr.ib(type=int, default=1)}) - cls = attr.s(cls, these=attributes) + attributes = {} + for k, v in cls.schema.items(): + attributes[k] = attr.ib(type=getattr(v, 'pythonic', type(None)), default=getattr(v, 'default', None)) + + attributes.update({'version': attr.ib(type=int, default=1)}) + cls = attr.s(cls, these=attributes) # skip parameters return super().__new__(cls) -class GenericObjectMeta(type, GenericObjectPropsMixin): +class GenericObjectPropsMeta(type, GenericObjectProps): + pass + + +class GenericObjectMeta(GenericObjectPropsMeta): """ Complex (or Binary) Object metaclass. It is aimed to help user create classes, which objects could serve as a pythonic representation of the @@ -103,10 +97,95 @@ def __new__( mcs: Any, name: str, base_classes: tuple, namespace: dict, **kwargs ) -> Any: """ Sort out class creation arguments. """ - return super().__new__( - mcs, name, (GenericObjectPropsMixin, )+base_classes, namespace + + result = super().__new__( + mcs, name, (GenericObjectProps, )+base_classes, namespace ) + def _build(self, client: 'Client' = None) -> int: + """ + Method for building binary representation of the Generic object + and calculating a hashcode from it. + + :param self: Generic object instance, + :param client: (optional) connection to Ignite cluster, + """ + if client is None: + compact_footer = True + else: + compact_footer = client.compact_footer + + # prepare header + header_class = BinaryObject.build_header() + header = header_class() + header.type_code = int.from_bytes( + BinaryObject.type_code, + byteorder=PROTOCOL_BYTE_ORDER + ) + header.flags = BinaryObject.USER_TYPE | BinaryObject.HAS_SCHEMA + if compact_footer: + header.flags |= BinaryObject.COMPACT_FOOTER + header.version = self.version + header.type_id = self.type_id + header.schema_id = self.schema_id + + # create fields and calculate offsets + offsets = [ctypes.sizeof(header_class)] + field_buffer = bytearray() + schema_items = list(self.schema.items()) + for field_name, field_type in schema_items: + partial_buffer = field_type.from_python( + getattr( + self, field_name, getattr(field_type, 'default', None) + ) + ) + offsets.append(max(offsets) + len(partial_buffer)) + field_buffer += partial_buffer + + offsets = offsets[:-1] + + # create footer + if max(offsets, default=0) < 255: + header.flags |= BinaryObject.OFFSET_ONE_BYTE + elif max(offsets) < 65535: + header.flags |= BinaryObject.OFFSET_TWO_BYTES + schema_class = BinaryObject.schema_type(header.flags) * len(offsets) + schema = schema_class() + if compact_footer: + for i, offset in enumerate(offsets): + schema[i] = offset + else: + for i, offset in enumerate(offsets): + schema[i].field_id = entity_id(schema_items[i][0]) + schema[i].offset = offset + + # calculate size and hash code + header.schema_offset = ( + ctypes.sizeof(header_class) + + len(field_buffer) + ) + header.length = header.schema_offset + ctypes.sizeof(schema_class) + header.hash_code = hashcode(field_buffer + bytes(schema)) + + # reuse the results + self._buffer = bytes(header) + field_buffer + bytes(schema) + self._hashcode = header.hash_code + + def _setattr(self, attr_name: str, attr_value: Any): + # reset binary representation, if any field is changed + if attr_name in self._schema.keys(): + self._buffer = None + self._hashcode = None + + # `super()` is really need these parameters + super(result, self).__setattr__(attr_name, attr_value) + + setattr(result, _build.__name__, _build) + setattr(result, '__setattr__', _setattr) + setattr(result, '_buffer', None) + setattr(result, '_hashcode', None) + return result + @staticmethod def _validate_schema(schema: dict): for field_type in schema.values(): @@ -117,7 +196,7 @@ def _validate_schema(schema: dict): def __init__( cls, name: str, base_classes: tuple, namespace: dict, - type_name: str=None, schema: OrderedDict=None, **kwargs + type_name: str = None, schema: OrderedDict = None, **kwargs ): """ Initializes binary object class. diff --git a/pyignite/cache.py b/pyignite/cache.py index 6cd7377..64093e8 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -13,13 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable, Optional, Union +import time +from typing import Any, Dict, Iterable, Optional, Tuple, Union +from .constants import * +from .binary import GenericObjectMeta from .datatypes import prop_codes +from .datatypes.internal import AnyDataObject from .exceptions import ( CacheCreationError, CacheError, ParameterError, SQLError, + connection_errors, +) +from .utils import ( + cache_id, get_field_by_id, is_wrapped, + status_to_exception, unsigned, unwrap_binary, ) -from .utils import cache_id, is_wrapped, status_to_exception, unwrap_binary from .api.cache_config import ( cache_create, cache_create_with_config, cache_get_or_create, cache_get_or_create_with_config, @@ -35,6 +43,7 @@ cache_remove_if_equals, cache_replace_if_equals, cache_get_size, ) from .api.sql import scan, scan_cursor_get_page, sql, sql_cursor_get_page +from .api.affinity import cache_get_node_partitions PROP_CODES = set([ @@ -63,6 +72,8 @@ class Cache: :py:meth:`~pyignite.client.Client.get_cache` methods instead. See :ref:`this example ` on how to do it. """ + + affinity = None _cache_id = None _name = None _client = None @@ -70,7 +81,7 @@ class Cache: @staticmethod def _validate_settings( - settings: Union[str, dict]=None, get_only: bool=False, + settings: Union[str, dict] = None, get_only: bool = False, ): if any([ not settings, @@ -89,8 +100,8 @@ def _validate_settings( raise ParameterError('Only cache name allowed as a parameter') def __init__( - self, client: 'Client', settings: Union[str, dict]=None, - with_get: bool=False, get_only: bool=False, + self, client: 'Client', settings: Union[str, dict] = None, + with_get: bool = False, get_only: bool = False, ): """ Initialize cache object. @@ -113,11 +124,26 @@ def __init__( if not get_only: func = CACHE_CREATE_FUNCS[type(settings) is dict][with_get] - result = func(client, settings) + result = func(client.random_node, settings) if result.status != 0: raise CacheCreationError(result.message) self._cache_id = cache_id(self._name) + self.affinity = { + 'version': (0, 0), + } + + def get_protocol_version(self) -> Optional[Tuple]: + """ + Returns the tuple of major, minor, and revision numbers of the used + thin protocol version, or None, if no connection to the Ignite cluster + was not yet established. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + """ + return self.client.protocol_version @property def settings(self) -> Optional[dict]: @@ -130,7 +156,10 @@ def settings(self) -> Optional[dict]: :return: dict of cache properties and their values. """ if self._settings is None: - config_result = cache_get_configuration(self._client, self._cache_id) + config_result = cache_get_configuration( + self.get_best_node(), + self._cache_id + ) if config_result.status == 0: self._settings = config_result.value else: @@ -185,10 +214,124 @@ def destroy(self): """ Destroys cache with a given name. """ - return cache_destroy(self._client, self._cache_id) + return cache_destroy(self.get_best_node(), self._cache_id) + + @status_to_exception(CacheError) + def _get_affinity(self, conn: 'Connection') -> Dict: + """ + Queries server for affinity mappings. Retries in case + of an intermittent error (most probably “Getting affinity for topology + version earlier than affinity is calculated”). + + :param conn: connection to Igneite server, + :return: OP_CACHE_PARTITIONS operation result value. + """ + for _ in range(AFFINITY_RETRIES or 1): + result = cache_get_node_partitions(conn, self._cache_id) + if result.status == 0 and result.value['partition_mapping']: + break + time.sleep(AFFINITY_DELAY) + + return result + + def get_best_node( + self, key: Any = None, key_hint: 'IgniteDataType' = None, + ) -> 'Connection': + """ + Returns the node from the list of the nodes, opened by client, that + most probably contains the needed key-value pair. See IEP-23. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + + :param key: (optional) pythonic key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: Ignite connection object. + """ + conn = self._client.random_node + + if self.client.partition_aware and key is not None: + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + if self.affinity['version'] < self._client.affinity_version: + # update partition mapping + while True: + try: + self.affinity = self._get_affinity(conn) + break + except connection_errors: + # retry if connection failed + pass + except CacheError: + # server did not create mapping in time + return conn + + # flatten it a bit + try: + self.affinity.update(self.affinity['partition_mapping'][0]) + except IndexError: + return conn + del self.affinity['partition_mapping'] + + # calculate the number of partitions + parts = 0 + if 'node_mapping' in self.affinity: + for p in self.affinity['node_mapping'].values(): + parts += len(p) + + self.affinity['number_of_partitions'] = parts + else: + # get number of partitions + parts = self.affinity.get('number_of_partitions') + + if not parts: + return conn + + if self.affinity['is_applicable']: + affinity_key_id = self.affinity['cache_config'].get( + key_hint.type_id, + None + ) + if affinity_key_id and isinstance(key, GenericObjectMeta): + key, key_hint = get_field_by_id(key, affinity_key_id) + + # calculate partition for key or affinity key + # (algorithm is taken from `RendezvousAffinityFunction.java`) + base_value = key_hint.hashcode(key, self._client) + mask = parts - 1 + + if parts & mask == 0: + part = (base_value ^ (unsigned(base_value) >> 16)) & mask + else: + part = abs(base_value // parts) + + assert 0 <= part < parts, 'Partition calculation has failed' + + # search for connection + try: + node_uuid, best_conn = None, None + for u, p in self.affinity['node_mapping'].items(): + if part in p: + node_uuid = u + break + + if node_uuid: + for n in conn.client._nodes: + if n.uuid == node_uuid: + best_conn = n + break + if best_conn and best_conn.alive: + conn = best_conn + except KeyError: + pass + + return conn @status_to_exception(CacheError) - def get(self, key, key_hint: object=None) -> Any: + def get(self, key, key_hint: object = None) -> Any: """ Retrieves a value from cache by key. @@ -197,12 +340,22 @@ def get(self, key, key_hint: object=None) -> Any: should be converted, :return: value retrieved. """ - result = cache_get(self._client, self._cache_id, key, key_hint=key_hint) + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + result = cache_get( + self.get_best_node(key, key_hint), + self._cache_id, + key, + key_hint=key_hint + ) result.value = self._process_binary(result.value) return result @status_to_exception(CacheError) - def put(self, key, value, key_hint: object=None, value_hint: object=None): + def put( + self, key, value, key_hint: object = None, value_hint: object = None + ): """ Puts a value with a given key to cache (overwriting existing value if any). @@ -214,8 +367,12 @@ def put(self, key, value, key_hint: object=None, value_hint: object=None): :param value_hint: (optional) Ignite data type, for which the given value should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_put( - self._client, self._cache_id, key, value, + self.get_best_node(key, key_hint), + self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint ) @@ -227,7 +384,7 @@ def get_all(self, keys: list) -> list: :param keys: list of keys or tuples of (key, key_hint), :return: a dict of key-value pairs. """ - result = cache_get_all(self._client, self._cache_id, keys) + result = cache_get_all(self.get_best_node(), self._cache_id, keys) if result.value: for key, value in result.value.items(): result.value[key] = self._process_binary(value) @@ -243,11 +400,11 @@ def put_all(self, pairs: dict): to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), """ - return cache_put_all(self._client, self._cache_id, pairs) + return cache_put_all(self.get_best_node(), self._cache_id, pairs) @status_to_exception(CacheError) def replace( - self, key, value, key_hint: object=None, value_hint: object=None + self, key, value, key_hint: object = None, value_hint: object = None ): """ Puts a value with a given key to cache only if the key already exist. @@ -259,28 +416,33 @@ def replace( :param value_hint: (optional) Ignite data type, for which the given value should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_replace( - self._client, self._cache_id, key, value, + self.get_best_node(key, key_hint), + self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint ) result.value = self._process_binary(result.value) return result @status_to_exception(CacheError) - def clear(self, keys: Optional[list]=None): + def clear(self, keys: Optional[list] = None): """ Clears the cache without notifying listeners or cache writers. :param keys: (optional) list of cache keys or (key, key type hint) tuples to clear (default: clear all). """ + conn = self.get_best_node() if keys: - return cache_clear_keys(self._client, self._cache_id, keys) + return cache_clear_keys(conn, self._cache_id, keys) else: - return cache_clear(self._client, self._cache_id) + return cache_clear(conn, self._cache_id) @status_to_exception(CacheError) - def clear_key(self, key, key_hint: object=None): + def clear_key(self, key, key_hint: object = None): """ Clears the cache key without notifying listeners or cache writers. @@ -288,8 +450,14 @@ def clear_key(self, key, key_hint: object=None): :param key_hint: (optional) Ignite data type, for which the given key should be converted, """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_clear_key( - self._client, self._cache_id, key, key_hint=key_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, + key_hint=key_hint ) @status_to_exception(CacheError) @@ -302,8 +470,14 @@ def contains_key(self, key, key_hint=None) -> bool: should be converted, :return: boolean `True` when key is present, `False` otherwise. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_contains_key( - self._client, self._cache_id, key, key_hint=key_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, + key_hint=key_hint ) @status_to_exception(CacheError) @@ -330,8 +504,14 @@ def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: value should be converted. :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_put( - self._client, self._cache_id, key, value, key_hint, value_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, value, + key_hint, value_hint ) result.value = self._process_binary(result.value) return result @@ -352,8 +532,14 @@ def get_and_put_if_absent( value should be converted, :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_put_if_absent( - self._client, self._cache_id, key, value, key_hint, value_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, value, + key_hint, value_hint ) result.value = self._process_binary(result.value) return result @@ -371,8 +557,14 @@ def put_if_absent(self, key, value, key_hint=None, value_hint=None): :param value_hint: (optional) Ignite data type, for which the given value should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_put_if_absent( - self._client, self._cache_id, key, value, key_hint, value_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, value, + key_hint, value_hint ) @status_to_exception(CacheError) @@ -385,8 +577,14 @@ def get_and_remove(self, key, key_hint=None) -> Any: should be converted, :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_remove( - self._client, self._cache_id, key, key_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, + key_hint ) result.value = self._process_binary(result.value) return result @@ -408,8 +606,14 @@ def get_and_replace( value should be converted. :return: old value or None. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_get_and_replace( - self._client, self._cache_id, key, value, key_hint, value_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, value, + key_hint, value_hint ) result.value = self._process_binary(result.value) return result @@ -423,7 +627,12 @@ def remove_key(self, key, key_hint=None): :param key_hint: (optional) Ignite data type, for which the given key should be converted, """ - return cache_remove_key(self._client, self._cache_id, key, key_hint) + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + return cache_remove_key( + self.get_best_node(key, key_hint), self._cache_id, key, key_hint + ) @status_to_exception(CacheError) def remove_keys(self, keys: list): @@ -433,14 +642,16 @@ def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ - return cache_remove_keys(self._client, self._cache_id, keys) + return cache_remove_keys( + self.get_best_node(), self._cache_id, keys + ) @status_to_exception(CacheError) def remove_all(self): """ Removes all cache entries, notifying listeners and cache writers. """ - return cache_remove_all(self._client, self._cache_id) + return cache_remove_all(self.get_best_node(), self._cache_id) @status_to_exception(CacheError) def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @@ -455,8 +666,14 @@ def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): :param sample_hint: (optional) Ignite data type, for whic the given sample should be converted. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + return cache_remove_if_equals( - self._client, self._cache_id, key, sample, key_hint, sample_hint + self.get_best_node(key, key_hint), + self._cache_id, + key, sample, + key_hint, sample_hint ) @status_to_exception(CacheError) @@ -479,8 +696,13 @@ def replace_if_equals( value should be converted, :return: boolean `True` when key is present, `False` otherwise. """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + result = cache_replace_if_equals( - self._client, self._cache_id, key, sample, value, + self.get_best_node(key, key_hint), + self._cache_id, + key, sample, value, key_hint, sample_hint, value_hint ) result.value = self._process_binary(result.value) @@ -496,9 +718,13 @@ def get_size(self, peek_modes=0): (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), :return: integer number of cache entries. """ - return cache_get_size(self._client, self._cache_id, peek_modes) + return cache_get_size( + self.get_best_node(), self._cache_id, peek_modes + ) - def scan(self, page_size: int=1, partitions: int=-1, local: bool=False): + def scan( + self, page_size: int = 1, partitions: int = -1, local: bool = False + ): """ Returns all key-value pairs from the cache, similar to `get_all`, but with internal pagination, which is slower, but safer. @@ -511,7 +737,15 @@ def scan(self, page_size: int=1, partitions: int=-1, local: bool=False): on local node only. Defaults to False, :return: generator with key-value pairs. """ - result = scan(self._client, self._cache_id, page_size, partitions, local) + node = self.get_best_node() + + result = scan( + node, + self._cache_id, + page_size, + partitions, + local + ) if result.status != 0: raise CacheError(result.message) @@ -522,7 +756,7 @@ def scan(self, page_size: int=1, partitions: int=-1, local: bool=False): yield k, v while result.value['more']: - result = scan_cursor_get_page(self._client, cursor) + result = scan_cursor_get_page(node, cursor) if result.status != 0: raise CacheError(result.message) @@ -532,9 +766,9 @@ def scan(self, page_size: int=1, partitions: int=-1, local: bool=False): yield k, v def select_row( - self, query_str: str, page_size: int=1, - query_args: Optional[list]=None, distributed_joins: bool=False, - replicated_only: bool=False, local: bool=False, timeout: int=0 + self, query_str: str, page_size: int = 1, + query_args: Optional[list] = None, distributed_joins: bool = False, + replicated_only: bool = False, local: bool = False, timeout: int = 0 ): """ Executes a simplified SQL SELECT query over data stored in the cache. @@ -554,6 +788,8 @@ def select_row( disables timeout (default), :return: generator with key-value pairs. """ + node = self.get_best_node() + def generate_result(value): cursor = value['cursor'] more = value['more'] @@ -563,7 +799,7 @@ def generate_result(value): yield k, v while more: - inner_result = sql_cursor_get_page(self._client, cursor) + inner_result = sql_cursor_get_page(node, cursor) if result.status != 0: raise SQLError(result.message) more = inner_result.value['more'] @@ -578,7 +814,7 @@ def generate_result(value): if not type_name: raise SQLError('Value type is unknown') result = sql( - self._client, + node, self._cache_id, type_name, query_str, diff --git a/pyignite/client.py b/pyignite/client.py index d5a9464..3202b78 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -41,7 +41,10 @@ """ from collections import defaultdict, OrderedDict -from typing import Iterable, Type, Union +import random +import re +from itertools import chain +from typing import Dict, Iterable, List, Optional, Tuple, Type, Union from .api.binary import get_binary_type, put_binary_type from .api.cache_config import cache_get_names @@ -51,15 +54,20 @@ from .constants import * from .datatypes import BinaryObject from .datatypes.internal import tc_map -from .exceptions import BinaryTypeError, CacheError, SQLError -from .utils import entity_id, schema_id, status_to_exception +from .exceptions import ( + BinaryTypeError, CacheError, ReconnectError, SQLError, connection_errors, +) +from .utils import ( + capitalize, entity_id, schema_id, process_delimiter, + status_to_exception, is_iterable, +) from .binary import GenericObjectMeta __all__ = ['Client'] -class Client(Connection): +class Client: """ This is a main `pyignite` class, that is build upon the :class:`~pyignite.connection.Connection`. In addition to the attributes, @@ -72,14 +80,22 @@ class Client(Connection): """ _registry = defaultdict(dict) - _compact_footer = None + _compact_footer: bool = None + _connection_args: Dict = None + _current_node: int = None + _nodes: List[Connection] = None - def _transfer_params(self, to: 'Client'): - super()._transfer_params(to) - to._registry = self._registry - to._compact_footer = self._compact_footer + # used for Complex object data class names sanitizing + _identifier = re.compile(r'[^0-9a-zA-Z_.+$]', re.UNICODE) + _ident_start = re.compile(r'^[^a-zA-Z_]+', re.UNICODE) - def __init__(self, compact_footer: bool=None, *args, **kwargs): + affinity_version: Optional[Tuple] = None + protocol_version: Optional[Tuple] = None + + def __init__( + self, compact_footer: bool = None, partition_aware: bool = False, + **kwargs + ): """ Initialize client. @@ -88,9 +104,154 @@ def __init__(self, compact_footer: bool=None, *args, **kwargs): Default is to use the same approach the server is using (None). Apache Ignite binary protocol documentation on this topic: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-schema + :param partition_aware: (optional) try to calculate the exact data + placement from the key before to issue the key operation to the + server node: + https://cwiki.apache.org/confluence/display/IGNITE/IEP-23%3A+Best+Effort+Affinity+for+thin+clients + The feature is in experimental status, so the parameter is `False` + by default. This will be changed later. """ self._compact_footer = compact_footer - super().__init__(*args, **kwargs) + self._connection_args = kwargs + self._nodes = [] + self._current_node = 0 + self._partition_aware = partition_aware + self.affinity_version = (0, 0) + + def get_protocol_version(self) -> Optional[Tuple]: + """ + Returns the tuple of major, minor, and revision numbers of the used + thin protocol version, or None, if no connection to the Ignite cluster + was not yet established. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + """ + return self.protocol_version + + @property + def partition_aware(self): + return self._partition_aware and self.partition_awareness_supported_by_protocol + + @property + def partition_awareness_supported_by_protocol(self): + # TODO: Need to re-factor this. I believe, we need separate class or + # set of functions to work with protocol versions without manually + # comparing versions with just some random tuples + return self.protocol_version is not None and self.protocol_version >= (1, 4, 0) + + def connect(self, *args): + """ + Connect to Ignite cluster node(s). + + :param args: (optional) host(s) and port(s) to connect to. + """ + if len(args) == 0: + # no parameters − use default Ignite host and port + nodes = [(IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT)] + elif len(args) == 1 and is_iterable(args[0]): + # iterable of host-port pairs is given + nodes = args[0] + elif ( + len(args) == 2 + and isinstance(args[0], str) + and isinstance(args[1], int) + ): + # host and port are given + nodes = [args] + else: + raise ConnectionError('Connection parameters are not valid.') + + # the following code is quite twisted, because the protocol version + # is initially unknown + + # TODO: open first node in foreground, others − in background + for i, node in enumerate(nodes): + host, port = node + conn = Connection(self, **self._connection_args) + conn.host = host + conn.port = port + + try: + if self.protocol_version is None or self.partition_aware: + # open connection before adding to the pool + conn.connect(host, port) + + # now we have the protocol version + if not self.partition_aware: + # do not try to open more nodes + self._current_node = i + else: + # take a chance to schedule the reconnection + # for all the failed connections, that was probed + # before this + for failed_node in self._nodes[:i]: + failed_node.reconnect() + + except connection_errors: + conn._fail() + if self.partition_aware: + # schedule the reconnection + conn.reconnect() + + self._nodes.append(conn) + + if self.protocol_version is None: + raise ReconnectError('Can not connect.') + + def close(self): + for conn in self._nodes: + conn.close() + self._nodes.clear() + + @property + def random_node(self) -> Connection: + """ + Returns random usable node. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + """ + if self.partition_aware: + # if partition awareness is used just pick a random connected node + try: + return random.choice( + list(n for n in self._nodes if n.alive) + ) + except IndexError: + # cannot choose from an empty sequence + raise ReconnectError('Can not reconnect: out of nodes.') from None + else: + # if partition awareness is not used then just return the current + # node if it's alive or the next usable node if connection with the + # current is broken + node = self._nodes[self._current_node] + if node.alive: + return node + + # close current (supposedly failed) node + self._nodes[self._current_node].close() + + # advance the node index + self._current_node += 1 + if self._current_node >= len(self._nodes): + self._current_node = 0 + + # prepare the list of node indexes to try to connect to + num_nodes = len(self._nodes) + for i in chain(range(self._current_node, num_nodes), range(self._current_node)): + node = self._nodes[i] + try: + node.connect(node.host, node.port) + except connection_errors: + pass + else: + return node + + # no nodes left + raise ReconnectError('Can not reconnect: out of nodes.') @status_to_exception(BinaryTypeError) def get_binary_type(self, binary_type: Union[str, int]) -> dict: @@ -135,7 +296,9 @@ def convert_schema( ) return converted_schema - result = get_binary_type(self, binary_type) + conn = self.random_node + + result = get_binary_type(conn, binary_type) if result.status != 0 or not result.value['type_exists']: return result @@ -178,8 +341,8 @@ def compact_footer(self, value: bool): @status_to_exception(BinaryTypeError) def put_binary_type( - self, type_name: str, affinity_key_field: str=None, - is_enum=False, schema: dict=None + self, type_name: str, affinity_key_field: str = None, + is_enum=False, schema: dict = None ): """ Registers binary type information in cluster. Do not update binary @@ -197,11 +360,11 @@ def put_binary_type( Binary type with no fields is OK. """ return put_binary_type( - self, type_name, affinity_key_field, is_enum, schema + self.random_node, type_name, affinity_key_field, is_enum, schema ) @staticmethod - def _create_dataclass(type_name: str, schema: OrderedDict=None) -> Type: + def _create_dataclass(type_name: str, schema: OrderedDict = None) -> Type: """ Creates default (generic) class for Ignite Complex object. @@ -224,13 +387,42 @@ def _sync_binary_registry(self, type_id: int): for schema in type_info['schemas']: if not self._registry[type_id].get(schema_id(schema), None): data_class = self._create_dataclass( - type_info['type_name'], + self._create_type_name(type_info['type_name']), schema, ) self._registry[type_id][schema_id(schema)] = data_class + @classmethod + def _create_type_name(cls, type_name: str) -> str: + """ + Creates Python data class name from Ignite binary type name. + + Handles all the special cases found in + `java.org.apache.ignite.binary.BinaryBasicNameMapper.simpleName()`. + Tries to adhere to PEP8 along the way. + """ + + # general sanitizing + type_name = cls._identifier.sub('', type_name) + + # - name ending with '$' (Scala) + # - name + '$' + some digits (anonymous class) + # - '$$Lambda$' in the middle + type_name = process_delimiter(type_name, '$') + + # .NET outer/inner class delimiter + type_name = process_delimiter(type_name, '+') + + # Java fully qualified class name + type_name = process_delimiter(type_name, '.') + + # start chars sanitizing + type_name = capitalize(cls._ident_start.sub('', type_name)) + + return type_name + def register_binary_type( - self, data_class: Type, affinity_key_field: str=None, + self, data_class: Type, affinity_key_field: str = None, ): """ Register the given class as a representation of a certain Complex @@ -250,8 +442,8 @@ def register_binary_type( self._registry[data_class.type_id][data_class.schema_id] = data_class def query_binary_type( - self, binary_type: Union[int, str], schema: Union[int, dict]=None, - sync: bool=True + self, binary_type: Union[int, str], schema: Union[int, dict] = None, + sync: bool = True ): """ Queries the registry of Complex object classes. @@ -324,16 +516,16 @@ def get_cache_names(self) -> list: :return: list of cache names. """ - return cache_get_names(self) + return cache_get_names(self.random_node) def sql( - self, query_str: str, page_size: int=1, query_args: Iterable=None, - schema: Union[int, str]='PUBLIC', - statement_type: int=0, distributed_joins: bool=False, - local: bool=False, replicated_only: bool=False, - enforce_join_order: bool=False, collocated: bool=False, - lazy: bool=False, include_field_names: bool=False, - max_rows: int=-1, timeout: int=0, + self, query_str: str, page_size: int = 1, query_args: Iterable = None, + schema: Union[int, str] = 'PUBLIC', + statement_type: int = 0, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, + max_rows: int = -1, timeout: int = 0, ): """ Runs an SQL query and returns its result. @@ -384,7 +576,7 @@ def generate_result(value): while more: inner_result = sql_fields_cursor_get_page( - self, cursor, field_count + conn, cursor, field_count ) if inner_result.status != 0: raise SQLError(result.message) @@ -392,9 +584,11 @@ def generate_result(value): for line in inner_result.value['data']: yield line + conn = self.random_node + schema = self.get_or_create_cache(schema) result = sql_fields( - self, schema.cache_id, query_str, + conn, schema.cache_id, query_str, page_size, query_args, schema.name, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, diff --git a/pyignite/connection/__init__.py b/pyignite/connection/__init__.py index 1f6f0c0..cf40718 100644 --- a/pyignite/connection/__init__.py +++ b/pyignite/connection/__init__.py @@ -33,15 +33,20 @@ as well as Ignite protocol handshaking. """ +from collections import OrderedDict import socket +from threading import Lock +from typing import Union from pyignite.constants import * from pyignite.exceptions import ( - HandshakeError, ParameterError, ReconnectError, SocketError, + HandshakeError, ParameterError, SocketError, connection_errors, ) +from pyignite.datatypes import Byte, Int, Short, String, UUIDObject +from pyignite.datatypes.internal import Struct +from pyignite.utils import DaemonicTimer -from pyignite.utils import is_iterable -from .handshake import HandshakeRequest, read_response +from .handshake import HandshakeRequest from .ssl import wrap @@ -60,18 +65,22 @@ class Connection: """ _socket = None - nodes = None + _failed = None + _in_use = None + + client = None host = None port = None timeout = None prefetch = None username = None password = None + ssl_params = {} + uuid = None @staticmethod - def _check_kwargs(kwargs): + def _check_ssl_params(params): expected_args = [ - 'timeout', 'use_ssl', 'ssl_version', 'ssl_ciphers', @@ -80,22 +89,24 @@ def _check_kwargs(kwargs): 'ssl_keyfile_password', 'ssl_certfile', 'ssl_ca_certfile', - 'username', - 'password', ] - for kw in kwargs: - if kw not in expected_args: + for param in params: + if param not in expected_args: raise ParameterError(( 'Unexpected parameter for connection initialization: `{}`' - ).format(kw)) + ).format(param)) - def __init__(self, prefetch: bytes=b'', **kwargs): + def __init__( + self, client: 'Client', prefetch: bytes = b'', timeout: int = None, + username: str = None, password: str = None, **ssl_params + ): """ Initialize connection. For the use of the SSL-related parameters see https://docs.python.org/3/library/ssl.html#ssl-certificates. + :param client: Ignite client object, :param prefetch: (optional) initialize the read-ahead data buffer. Empty by default, :param timeout: (optional) sets timeout (in seconds) for each socket @@ -131,47 +142,159 @@ def __init__(self, prefetch: bytes=b'', **kwargs): cluster, :param password: (optional) password to authenticate to Ignite cluster. """ + self.client = client self.prefetch = prefetch - self._check_kwargs(kwargs) - self.timeout = kwargs.pop('timeout', None) - self.username = kwargs.pop('username', None) - self.password = kwargs.pop('password', None) - if all([self.username, self.password, 'use_ssl' not in kwargs]): - kwargs['use_ssl'] = True - self.init_kwargs = kwargs - - read_response = read_response - _wrap = wrap + self.timeout = timeout + self.username = username + self.password = password + self._check_ssl_params(ssl_params) + if self.username and self.password and 'use_ssl' not in ssl_params: + ssl_params['use_ssl'] = True + self.ssl_params = ssl_params + self._failed = False + self._in_use = Lock() @property def socket(self) -> socket.socket: - """ - Network socket. - """ - if self._socket is None: - self._reconnect() + """ Network socket. """ return self._socket + @property + def closed(self) -> bool: + """ Tells if socket is closed. """ + return self._socket is None + + @property + def failed(self) -> bool: + """ Tells if connection is failed. """ + return self._failed + + @property + def alive(self) -> bool: + """ Tells if connection is up and no failure detected. """ + return not (self._failed or self.closed) + def __repr__(self) -> str: - if self.host and self.port: - return '{}:{}'.format(self.host, self.port) - else: - return '' + return '{}:{}'.format(self.host or '?', self.port or '?') + + _wrap = wrap + + def get_protocol_version(self): + """ + Returns the tuple of major, minor, and revision numbers of the used + thin protocol version, or None, if no connection to the Ignite cluster + was yet established. + """ + return self.client.protocol_version + + def _fail(self): + """ set client to failed state. """ + self._failed = True + self._in_use.release() + + def read_response(self) -> Union[dict, OrderedDict]: + """ + Processes server's response to the handshake request. + + :return: handshake data. + """ + response_start = Struct([ + ('length', Int), + ('op_code', Byte), + ]) + start_class, start_buffer = response_start.parse(self) + start = start_class.from_buffer_copy(start_buffer) + data = response_start.to_python(start) + response_end = None + if data['op_code'] == 0: + response_end = Struct([ + ('version_major', Short), + ('version_minor', Short), + ('version_patch', Short), + ('message', String), + ]) + elif self.get_protocol_version() >= (1, 4, 0): + response_end = Struct([ + ('node_uuid', UUIDObject), + ]) + if response_end: + end_class, end_buffer = response_end.parse(self) + end = end_class.from_buffer_copy(end_buffer) + data.update(response_end.to_python(end)) + return data + + def connect( + self, host: str = None, port: int = None + ) -> Union[dict, OrderedDict]: + """ + Connect to the given server node with protocol version fallback. - def _connect(self, host: str, port: int): + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number. + """ + detecting_protocol = False + + # go non-blocking for faster reconnect + if not self._in_use.acquire(blocking=False): + raise ConnectionError('Connection is in use.') + + # choose highest version first + if self.client.protocol_version is None: + detecting_protocol = True + self.client.protocol_version = max(PROTOCOLS) + + try: + result = self._connect_version(host, port) + except HandshakeError as e: + if e.expected_version in PROTOCOLS: + self.client.protocol_version = e.expected_version + result = self._connect_version(host, port) + else: + raise e + except connection_errors: + # restore undefined protocol version + if detecting_protocol: + self.client.protocol_version = None + raise + + # connection is ready for end user + self.uuid = result.get('node_uuid', None) # version-specific (1.4+) + + self._failed = False + return result + + def _connect_version( + self, host: str = None, port: int = None, + ) -> Union[dict, OrderedDict]: """ - Actually connect socket. + Connect to the given server node using protocol version + defined on client. + + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number. """ + + host = host or IGNITE_DEFAULT_HOST + port = port or IGNITE_DEFAULT_PORT + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(self.timeout) self._socket = self._wrap(self.socket) self._socket.connect((host, port)) - hs_request = HandshakeRequest(self.username, self.password) + protocol_version = self.client.protocol_version + + hs_request = HandshakeRequest( + protocol_version, + self.username, + self.password + ) self.send(hs_request) hs_response = self.read_response() if hs_response['op_code'] == 0: - self.close() + # disconnect but keep in use + self.close(release=False) + error_text = 'Handshake error: {}'.format(hs_response['message']) # if handshake fails for any reason other than protocol mismatch # (i.e. authentication error), server version is 0.0.0 @@ -185,74 +308,78 @@ def _connect(self, host: str, port: int): '{version_major}.{version_minor}.{version_patch}. Client ' 'provides {client_major}.{client_minor}.{client_patch}.' ).format( - client_major=PROTOCOL_VERSION_MAJOR, - client_minor=PROTOCOL_VERSION_MINOR, - client_patch=PROTOCOL_VERSION_PATCH, + client_major=protocol_version[0], + client_minor=protocol_version[1], + client_patch=protocol_version[2], **hs_response ) - raise HandshakeError(error_text) + raise HandshakeError(( + hs_response['version_major'], + hs_response['version_minor'], + hs_response['version_patch'], + ), error_text) self.host, self.port = host, port + return hs_response - def connect(self, *args): + def reconnect(self, seq_no=0): """ - Connect to the server. Connection parameters may be either one node - (host and port), or list (or other iterable) of nodes. - - :param host: Ignite server host, - :param port: Ignite server port, - :param nodes: iterable of (host, port) tuples. + Tries to reconnect synchronously, then in background. """ - self.nodes = iter([]) - if len(args) == 0: - host, port = IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT - elif len(args) == 1 and is_iterable(args[0]): - self.nodes = iter(args[0]) - host, port = next(self.nodes) - elif ( - len(args) == 2 - and isinstance(args[0], str) - and isinstance(args[1], int) - ): - host, port = args - else: - raise ConnectionError('Connection parameters are not valid.') - self._connect(host, port) + # stop trying to reconnect + if seq_no >= len(RECONNECT_BACKOFF_SEQUENCE): + self._failed = False + + self._reconnect() + + if self.failed: + DaemonicTimer( + RECONNECT_BACKOFF_SEQUENCE[seq_no], + self.reconnect, + kwargs={'seq_no': seq_no + 1}, + ).start() def _reconnect(self): - """ - Restore the connection using the next node in `nodes` iterable. - """ - for host, port in self.nodes: - try: - self._connect(host, port) - return - except OSError: - pass - self.host = self.port = self.nodes = None - # exception chaining gives a misleading traceback here - raise ReconnectError('Can not reconnect: out of nodes') from None + # do not reconnect if connection is already working + # or was closed on purpose + if not self.failed: + return + + # return connection to initial state regardless of use lock + self.close(release=False) + try: + self._in_use.release() + except RuntimeError: + pass + + # connect and silence the connection errors + try: + self.connect(self.host, self.port) + except connection_errors: + pass def _transfer_params(self, to: 'Connection'): """ Transfer non-SSL parameters to target connection object. - :param target: connection object to transfer parameters to. + :param to: connection object to transfer parameters to. """ to.username = self.username to.password = self.password - to.nodes = self.nodes + to.client = self.client + to.host = self.host + to.port = self.port - def clone(self, prefetch: bytes=b'') -> 'Connection': + def clone(self, prefetch: bytes = b'') -> 'Connection': """ Clones this connection in its current state. :return: `Connection` object. """ - clone = self.__class__(**self.init_kwargs) + clone = self.__class__(self.client, **self.ssl_params) self._transfer_params(to=clone) - if self.port and self.host: - clone._connect(self.host, self.port) + if self.alive: + clone.connect(self.host, self.port) clone.prefetch = prefetch return clone @@ -263,6 +390,9 @@ def send(self, data: bytes, flags=None): :param data: bytes to send, :param flags: (optional) OS-specific flags. """ + if self.closed: + raise SocketError('Attempt to use closed connection.') + kwargs = {} if flags is not None: kwargs['flags'] = flags @@ -271,13 +401,18 @@ def send(self, data: bytes, flags=None): while total_bytes_sent < len(data): try: - bytes_sent = self.socket.send(data[total_bytes_sent:], **kwargs) - except OSError: - self._socket = self.host = self.port = None + bytes_sent = self.socket.send( + data[total_bytes_sent:], + **kwargs + ) + except connection_errors: + self._fail() + self.reconnect() raise if bytes_sent == 0: - self.socket.close() - raise SocketError('Socket connection broken.') + self._fail() + self.reconnect() + raise SocketError('Connection broken.') total_bytes_sent += bytes_sent def recv(self, buffersize, flags=None) -> bytes: @@ -288,14 +423,18 @@ def recv(self, buffersize, flags=None) -> bytes: :param flags: (optional) OS-specific flags, :return: data received. """ + if self.closed: + raise SocketError('Attempt to use closed connection.') + pref_size = len(self.prefetch) if buffersize > pref_size: result = self.prefetch self.prefetch = b'' try: result += self._recv(buffersize-pref_size, flags) - except (SocketError, OSError): - self._socket = self.host = self.port = None + except connection_errors: + self._fail() + self.reconnect() raise return result else: @@ -316,18 +455,28 @@ def _recv(self, buffersize, flags=None) -> bytes: while bytes_rcvd < buffersize: chunk = self.socket.recv(buffersize-bytes_rcvd, **kwargs) if chunk == b'': - self.socket.close() - raise SocketError('Socket connection broken.') + raise SocketError('Connection broken.') chunks.append(chunk) bytes_rcvd += len(chunk) return b''.join(chunks) - def close(self): + def close(self, release=True): """ - Mark socket closed. This is recommended but not required, since - sockets are automatically closed when they are garbage-collected. + Try to mark socket closed, then unlink it. This is recommended but + not required, since sockets are automatically closed when + garbage-collected. """ - self._socket.shutdown(socket.SHUT_RDWR) - self._socket.close() - self._socket = self.host = self.port = None + if release: + try: + self._in_use.release() + except RuntimeError: + pass + + if self._socket: + try: + self._socket.shutdown(socket.SHUT_RDWR) + self._socket.close() + except connection_errors: + pass + self._socket = None diff --git a/pyignite/connection/generators.py b/pyignite/connection/generators.py deleted file mode 100644 index d76db0e..0000000 --- a/pyignite/connection/generators.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class RoundRobin: - """ - Round-robin generator for use with `Client.connect()`. Cycles a node - list until a maximum number of reconnects is reached (if set). - """ - - def __init__(self, nodes: list, max_reconnects: int=None): - """ - :param nodes: list of two-tuples of (host, port) format, - :param max_reconnects: (optional) maximum number of reconnect attempts. - defaults to None (cycle nodes infinitely). - """ - self.nodes = nodes - self.max_reconnects = max_reconnects - self.node_index = 0 - self.reconnects = 0 - - def __iter__(self) -> 'RoundRobin': - return self - - def __next__(self) -> tuple: - if self.max_reconnects is not None: - if self.reconnects >= self.max_reconnects: - raise StopIteration - else: - self.reconnects += 1 - - if self.node_index >= len(self.nodes): - self.node_index = 0 - node = self.nodes[self.node_index] - self.node_index += 1 - return node diff --git a/pyignite/connection/handshake.py b/pyignite/connection/handshake.py index 13d57fe..2e0264f 100644 --- a/pyignite/connection/handshake.py +++ b/pyignite/connection/handshake.py @@ -13,9 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import Optional, Tuple -from pyignite.constants import * from pyignite.datatypes import Byte, Int, Short, String from pyignite.datatypes.internal import Struct @@ -27,9 +26,11 @@ class HandshakeRequest: handshake_struct = None username = None password = None + protocol_version = None def __init__( - self, username: Optional[str]=None, password: Optional[str]=None + self, protocol_version: Tuple[int, int, int], + username: Optional[str] = None, password: Optional[str] = None ): fields = [ ('length', Int), @@ -39,6 +40,7 @@ def __init__( ('version_patch', Short), ('client_code', Byte), ] + self.protocol_version = protocol_version if username and password: self.username = username self.password = password @@ -52,9 +54,9 @@ def __bytes__(self) -> bytes: handshake_data = { 'length': 8, 'op_code': OP_HANDSHAKE, - 'version_major': PROTOCOL_VERSION_MAJOR, - 'version_minor': PROTOCOL_VERSION_MINOR, - 'version_patch': PROTOCOL_VERSION_PATCH, + 'version_major': self.protocol_version[0], + 'version_minor': self.protocol_version[1], + 'version_patch': self.protocol_version[2], 'client_code': 2, # fixed value defined by protocol } if self.username and self.password: @@ -68,24 +70,3 @@ def __bytes__(self) -> bytes: len(self.password), ]) return self.handshake_struct.from_python(handshake_data) - - -def read_response(client): - response_start = Struct([ - ('length', Int), - ('op_code', Byte), - ]) - start_class, start_buffer = response_start.parse(client) - start = start_class.from_buffer_copy(start_buffer) - data = response_start.to_python(start) - if data['op_code'] == 0: - response_end = Struct([ - ('version_major', Short), - ('version_minor', Short), - ('version_patch', Short), - ('message', String), - ]) - end_class, end_buffer = response_end.parse(client) - end = end_class.from_buffer_copy(end_buffer) - data.update(response_end.to_python(end)) - return data diff --git a/pyignite/connection/ssl.py b/pyignite/connection/ssl.py index 044b103..9773860 100644 --- a/pyignite/connection/ssl.py +++ b/pyignite/connection/ssl.py @@ -19,24 +19,24 @@ from pyignite.constants import * -def wrap(client, _socket): +def wrap(conn: 'Connection', _socket): """ Wrap socket in SSL wrapper. """ - if client.init_kwargs.get('use_ssl', None): - keyfile = client.init_kwargs.get('ssl_keyfile', None) - certfile = client.init_kwargs.get('ssl_certfile', None) + if conn.ssl_params.get('use_ssl', None): + keyfile = conn.ssl_params.get('ssl_keyfile', None) + certfile = conn.ssl_params.get('ssl_certfile', None) if keyfile and not certfile: raise ValueError("certfile must be specified") - password = client.init_kwargs.get('ssl_keyfile_password', None) - ssl_version = client.init_kwargs.get('ssl_version', SSL_DEFAULT_VERSION) - ciphers = client.init_kwargs.get('ssl_ciphers', SSL_DEFAULT_CIPHERS) - cert_reqs = client.init_kwargs.get('ssl_cert_reqs', ssl.CERT_NONE) - ca_certs = client.init_kwargs.get('ssl_ca_certfile', None) + password = conn.ssl_params.get('ssl_keyfile_password', None) + ssl_version = conn.ssl_params.get('ssl_version', SSL_DEFAULT_VERSION) + ciphers = conn.ssl_params.get('ssl_ciphers', SSL_DEFAULT_CIPHERS) + cert_reqs = conn.ssl_params.get('ssl_cert_reqs', ssl.CERT_NONE) + ca_certs = conn.ssl_params.get('ssl_ca_certfile', None) context = SSLContext(ssl_version) context.verify_mode = cert_reqs - + if ca_certs: context.load_verify_locations(ca_certs) if certfile: diff --git a/pyignite/constants.py b/pyignite/constants.py index 78c9379..fc840d6 100644 --- a/pyignite/constants.py +++ b/pyignite/constants.py @@ -21,16 +21,23 @@ __all__ = [ - 'PROTOCOL_VERSION_MAJOR', 'PROTOCOL_VERSION_MINOR', - 'PROTOCOL_VERSION_PATCH', 'MAX_LONG', 'MIN_LONG', 'MAX_INT', 'MIN_INT', + 'PROTOCOLS', 'MAX_LONG', 'MIN_LONG', 'MAX_INT', 'MIN_INT', 'PROTOCOL_BYTE_ORDER', 'PROTOCOL_STRING_ENCODING', 'PROTOCOL_CHAR_ENCODING', 'SSL_DEFAULT_VERSION', 'SSL_DEFAULT_CIPHERS', 'FNV1_OFFSET_BASIS', 'FNV1_PRIME', 'IGNITE_DEFAULT_HOST', 'IGNITE_DEFAULT_PORT', + 'RHF_ERROR', 'RHF_TOPOLOGY_CHANGED', 'AFFINITY_DELAY', 'AFFINITY_RETRIES', + 'RECONNECT_BACKOFF_SEQUENCE', ] +PROTOCOLS = { + (1, 4, 0), + (1, 3, 0), + (1, 2, 0), +} + PROTOCOL_VERSION_MAJOR = 1 -PROTOCOL_VERSION_MINOR = 2 +PROTOCOL_VERSION_MINOR = 4 PROTOCOL_VERSION_PATCH = 0 MAX_LONG = 9223372036854775807 @@ -50,3 +57,12 @@ IGNITE_DEFAULT_HOST = 'localhost' IGNITE_DEFAULT_PORT = 10800 + +# response header flags +RHF_ERROR = 1 +RHF_TOPOLOGY_CHANGED = 2 + +AFFINITY_DELAY = 0.01 +AFFINITY_RETRIES = 32 + +RECONNECT_BACKOFF_SEQUENCE = [0, 1, 1, 2, 3, 5, 8, 13] diff --git a/pyignite/datatypes/base.py b/pyignite/datatypes/base.py index a0522c0..25b5b1e 100644 --- a/pyignite/datatypes/base.py +++ b/pyignite/datatypes/base.py @@ -13,10 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. -from abc import ABC +class IgniteDataTypeProps: + """ + Add `type_name` and `type_id` properties for all classes and objects + of Ignite type hierarchy. + """ + @property + def type_name(self) -> str: + """ Binary object type name. """ + return getattr(self, '_type_name', None) + + @property + def type_id(self) -> int: + """ Binary object type ID. """ + from pyignite.utils import entity_id + + return getattr( + self, + '_type_id', + entity_id(getattr(self, '_type_name', None)) + ) + + +class IgniteDataTypeMeta(type, IgniteDataTypeProps): + """ + Class variant of Ignate data type properties. + """ + pass -class IgniteDataType(ABC): +class IgniteDataType(metaclass=IgniteDataTypeMeta): """ This is a base class for all Ignite data types, a.k.a. parser/constructor classes, both object and payload varieties. diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index 87e5130..d9ce36a 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -16,13 +16,15 @@ from collections import OrderedDict import ctypes import inspect +from typing import Iterable, Dict from pyignite.constants import * from pyignite.exceptions import ParseError -from pyignite.utils import entity_id, hashcode, is_hinted from .base import IgniteDataType from .internal import AnyDataObject, infer_from_python from .type_codes import * +from .type_ids import * +from .type_names import * __all__ = [ @@ -33,11 +35,21 @@ class ObjectArrayObject(IgniteDataType): """ - Array of objects of any type. Its Python representation is - tuple(type_id, iterable of any type). + Array of Ignite objects of any consistent type. Its Python representation + is tuple(type_id, iterable of any type). The only type ID that makes sense + in Python client is :py:attr:`~OBJECT`, that corresponds directly to + the root object type in Java type hierarchy (`java.lang.Object`). """ + OBJECT = -1 + + _type_name = NAME_OBJ_ARR + _type_id = TYPE_OBJ_ARR type_code = TC_OBJECT_ARRAY - type_or_id_name = 'type_id' + + @staticmethod + def hashcode(value: Iterable) -> int: + # Arrays are not supported as keys at the moment. + return 0 @classmethod def build_header(cls): @@ -86,7 +98,7 @@ def to_python(cls, ctype_object, *args, **kwargs): *args, **kwargs ) ) - return getattr(ctype_object, cls.type_or_id_name), result + return ctype_object.type_id, result @classmethod def from_python(cls, value): @@ -103,12 +115,12 @@ def from_python(cls, value): value = [value] length = 1 header.length = length - setattr(header, cls.type_or_id_name, type_or_id) - buffer = bytes(header) + header.type_id = type_or_id + buffer = bytearray(header) for x in value: buffer += infer_from_python(x) - return buffer + return bytes(buffer) class WrappedDataObject(IgniteDataType): @@ -167,19 +179,53 @@ def from_python(cls, value): raise ParseError('Send unwrapped data.') -class CollectionObject(ObjectArrayObject): +class CollectionObject(IgniteDataType): """ - Just like object array, but contains deserialization type hint instead of - type id. This hint is also useless in Python, because the list type along - covers all the use cases. - - Also represented as tuple(type_id, iterable of any type) in Python. + Similar to object array, but contains platform-agnostic deserialization + type hint instead of type ID. + + Represented as tuple(hint, iterable of any type) in Python. Hints are: + + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.USER_SET` − + a set of unique Ignite thin data objects. The exact Java type of a set + is undefined, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.USER_COL` − + a collection of Ignite thin data objects. The exact Java type + of a collection is undefined, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.ARR_LIST` − + represents the `java.util.ArrayList` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.LINKED_LIST` − + represents the `java.util.LinkedList` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.HASH_SET`− + represents the `java.util.HashSet` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.LINKED_HASH_SET` − + represents the `java.util.LinkedHashSet` type, + * :py:attr:`~pyignite.datatypes.complex.CollectionObject.SINGLETON_LIST` − + represents the return type of the `java.util.Collection.singletonList` + method. + + It is safe to say that `USER_SET` (`set` in Python) and `USER_COL` (`list`) + can cover all the imaginable use cases from Python perspective. """ + USER_SET = -1 + USER_COL = 0 + ARR_LIST = 1 + LINKED_LIST = 2 + HASH_SET = 3 + LINKED_HASH_SET = 4 + SINGLETON_LIST = 5 + + _type_name = NAME_COL + _type_id = TYPE_COL type_code = TC_COLLECTION - type_or_id_name = 'type' pythonic = list default = [] + @staticmethod + def hashcode(value: Iterable) -> int: + # Collections are not supported as keys at the moment. + return 0 + @classmethod def build_header(cls): return type( @@ -195,6 +241,62 @@ def build_header(cls): } ) + @classmethod + def parse(cls, client: 'Client'): + header_class = cls.build_header() + buffer = client.recv(ctypes.sizeof(header_class)) + header = header_class.from_buffer_copy(buffer) + fields = [] + + for i in range(header.length): + c_type, buffer_fragment = AnyDataObject.parse(client) + buffer += buffer_fragment + fields.append(('element_{}'.format(i), c_type)) + + final_class = type( + cls.__name__, + (header_class,), + { + '_pack_': 1, + '_fields_': fields, + } + ) + return final_class, buffer + + @classmethod + def to_python(cls, ctype_object, *args, **kwargs): + result = [] + for i in range(ctype_object.length): + result.append( + AnyDataObject.to_python( + getattr(ctype_object, 'element_{}'.format(i)), + *args, **kwargs + ) + ) + return ctype_object.type, result + + @classmethod + def from_python(cls, value): + type_or_id, value = value + header_class = cls.build_header() + header = header_class() + header.type_code = int.from_bytes( + cls.type_code, + byteorder=PROTOCOL_BYTE_ORDER + ) + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + header.length = length + header.type = type_or_id + buffer = bytearray(header) + + for x in value: + buffer += infer_from_python(x) + return bytes(buffer) + class Map(IgniteDataType): """ @@ -203,9 +305,16 @@ class Map(IgniteDataType): Ignite does not track the order of key-value pairs in its caches, hence the ordinary Python dict type, not the collections.OrderedDict. """ + _type_name = NAME_MAP + _type_id = TYPE_MAP HASH_MAP = 1 LINKED_HASH_MAP = 2 + @staticmethod + def hashcode(value: Dict) -> int: + # Maps are not supported as keys at the moment. + return 0 + @classmethod def build_header(cls): return type( @@ -271,22 +380,25 @@ def from_python(cls, value, type_id=None): ) if hasattr(header, 'type'): header.type = type_id - buffer = bytes(header) + buffer = bytearray(header) for k, v in value.items(): buffer += infer_from_python(k) buffer += infer_from_python(v) - return buffer + return bytes(buffer) class MapObject(Map): """ - This is a dictionary type. Type conversion hint can be a `HASH_MAP` - (ordinary dict) or `LINKED_HASH_MAP` (collections.OrderedDict). + This is a dictionary type. + + Represented as tuple(type_id, value). - Keys and values in map are independent data objects, but `count` - counts pairs. Very annoying. + Type ID can be a :py:attr:`~HASH_MAP` (corresponds to an ordinary `dict` + in Python) or a :py:attr:`~LINKED_HASH_MAP` (`collections.OrderedDict`). """ + _type_name = NAME_MAP + _type_id = TYPE_MAP type_code = TC_MAP pythonic = dict default = {} @@ -319,6 +431,7 @@ def from_python(cls, value): class BinaryObject(IgniteDataType): + _type_id = TYPE_BINARY_OBJ type_code = TC_COMPLEX_OBJECT USER_TYPE = 0x0001 @@ -328,6 +441,46 @@ class BinaryObject(IgniteDataType): OFFSET_TWO_BYTES = 0x0010 COMPACT_FOOTER = 0x0020 + @staticmethod + def find_client(): + """ + A nice hack. Extracts the nearest `Client` instance from the + call stack. + """ + from pyignite import Client + from pyignite.connection import Connection + + frame = None + try: + for rec in inspect.stack()[2:]: + frame = rec[0] + code = frame.f_code + for varname in code.co_varnames: + suspect = frame.f_locals[varname] + if isinstance(suspect, Client): + return suspect + if isinstance(suspect, Connection): + return suspect.client + finally: + del frame + + @staticmethod + def hashcode( + value: object, client: 'Client' = None, *args, **kwargs + ) -> int: + # binary objects's hashcode implementation is special in the sense + # that you need to fully serialize the object to calculate + # its hashcode + if value._hashcode is None: + + # …and for to serialize it you need a Client instance + if client is None: + client = BinaryObject.find_client() + + value._build(client) + + return value._hashcode + @classmethod def build_header(cls): return type( @@ -373,11 +526,12 @@ def schema_type(cls, flags: int): ) @staticmethod - def get_dataclass(client: 'Client', header) -> OrderedDict: + def get_dataclass(conn: 'Connection', header) -> OrderedDict: # get field names from outer space - temp_conn = client.clone() - result = temp_conn.query_binary_type(header.type_id, header.schema_id) - temp_conn.close() + result = conn.client.query_binary_type( + header.type_id, + header.schema_id + ) if not result: raise ParseError('Binary type is not registered') return result @@ -417,7 +571,7 @@ def parse(cls, client: 'Client'): return final_class, buffer @classmethod - def to_python(cls, ctype_object, client: 'Client'=None, *args, **kwargs): + def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): if not client: raise ParseError( @@ -443,84 +597,19 @@ def to_python(cls, ctype_object, client: 'Client'=None, *args, **kwargs): @classmethod def from_python(cls, value: object): - def find_client(): - """ - A nice hack. Extracts the nearest `Client` instance from the - call stack. - """ - from pyignite import Client - - frame = None - try: - for rec in inspect.stack()[2:]: - frame = rec[0] - code = frame.f_code - for varname in code.co_varnames: - suspect = frame.f_locals[varname] - if isinstance(suspect, Client): - return suspect - finally: - del frame - - compact_footer = True # this is actually used - client = find_client() - if client: + if getattr(value, '_buffer', None) is None: + client = cls.find_client() + # if no client can be found, the class of the `value` is discarded # and the new dataclass is automatically registered later on - client.register_binary_type(value.__class__) - compact_footer = client.compact_footer - else: - raise Warning( - 'Can not register binary type {}'.format(value.type_name) - ) + if client: + client.register_binary_type(value.__class__) + else: + raise Warning( + 'Can not register binary type {}'.format(value.type_name) + ) - # prepare header - header_class = cls.build_header() - header = header_class() - header.type_code = int.from_bytes( - cls.type_code, - byteorder=PROTOCOL_BYTE_ORDER - ) + # build binary representation + value._build(client) - header.flags = cls.USER_TYPE | cls.HAS_SCHEMA - if compact_footer: - header.flags |= cls.COMPACT_FOOTER - header.version = value.version - header.type_id = value.type_id - header.schema_id = value.schema_id - - # create fields and calculate offsets - field_buffer = b'' - offsets = [ctypes.sizeof(header_class)] - schema_items = list(value.schema.items()) - for field_name, field_type in schema_items: - partial_buffer = field_type.from_python( - getattr( - value, field_name, getattr(field_type, 'default', None) - ) - ) - offsets.append(max(offsets) + len(partial_buffer)) - field_buffer += partial_buffer - - offsets = offsets[:-1] - - # create footer - if max(offsets, default=0) < 255: - header.flags |= cls.OFFSET_ONE_BYTE - elif max(offsets) < 65535: - header.flags |= cls.OFFSET_TWO_BYTES - schema_class = cls.schema_type(header.flags) * len(offsets) - schema = schema_class() - if compact_footer: - for i, offset in enumerate(offsets): - schema[i] = offset - else: - for i, offset in enumerate(offsets): - schema[i].field_id = entity_id(schema_items[i][0]) - schema[i].offset = offset - # calculate size and hash code - header.schema_offset = ctypes.sizeof(header_class) + len(field_buffer) - header.length = header.schema_offset + ctypes.sizeof(schema_class) - header.hash_code = hashcode(field_buffer + bytes(schema)) - - return bytes(header) + field_buffer + bytes(schema) + return value._buffer diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 844e0ef..9fd5d64 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -17,7 +17,7 @@ import ctypes import decimal from datetime import date, datetime, timedelta -from typing import Any, Tuple +from typing import Any, Tuple, Union, Callable import uuid import attr @@ -28,10 +28,13 @@ from .type_codes import * -__all__ = ['AnyDataArray', 'AnyDataObject', 'Struct', 'StructArray', 'tc_map'] +__all__ = [ + 'AnyDataArray', 'AnyDataObject', 'Struct', 'StructArray', 'tc_map', + 'infer_from_python', +] -def tc_map(key: bytes, _memo_map: dict={}): +def tc_map(key: bytes, _memo_map: dict = {}): """ Returns a default parser/generator class for the given type code. @@ -108,6 +111,20 @@ def tc_map(key: bytes, _memo_map: dict={}): return _memo_map[key] +class Conditional: + + def __init__(self, predicate1: Callable[[any], bool], predicate2: Callable[[any], bool], var1, var2): + self.predicate1 = predicate1 + self.predicate2 = predicate2 + self.var1 = var1 + self.var2 = var2 + + def parse(self, client: 'Client', context): + return self.var1.parse(client) if self.predicate1(context) else self.var2.parse(client) + + def to_python(self, ctype_object, context, *args, **kwargs): + return self.var1.to_python(ctype_object, *args, **kwargs) if self.predicate2(context) else self.var2.to_python(ctype_object, *args, **kwargs) + @attr.s class StructArray: """ `counter_type` counter, followed by count*following structure. """ @@ -167,7 +184,7 @@ def from_python(self, value): header_class = self.build_header_class() header = header_class() header.length = length - buffer = bytes(header) + buffer = bytearray(header) for i, v in enumerate(value): for default_key, default_value in self.defaults.items(): @@ -175,7 +192,7 @@ def from_python(self, value): for name, el_class in self.following: buffer += el_class.from_python(v[name]) - return buffer + return bytes(buffer) @attr.s @@ -185,16 +202,22 @@ class Struct: dict_type = attr.ib(default=OrderedDict) defaults = attr.ib(type=dict, default={}) - def parse(self, client: 'Client') -> Tuple[type, bytes]: + def parse( + self, client: 'Client' + ) -> Tuple[ctypes.BigEndianStructure, bytes]: buffer = b'' fields = [] + values = {} for name, c_type in self.fields: - c_type, buffer_fragment = c_type.parse(client) + is_cond = isinstance(c_type, Conditional) + c_type, buffer_fragment = c_type.parse(client, values) if is_cond else c_type.parse(client) buffer += buffer_fragment fields.append((name, c_type)) + values[name] = buffer_fragment + data_class = type( 'Struct', (ctypes.LittleEndianStructure,), @@ -206,10 +229,17 @@ def parse(self, client: 'Client') -> Tuple[type, bytes]: return data_class, buffer - def to_python(self, ctype_object, *args, **kwargs) -> Any: + def to_python( + self, ctype_object, *args, **kwargs + ) -> Union[dict, OrderedDict]: result = self.dict_type() for name, c_type in self.fields: + is_cond = isinstance(c_type, Conditional) result[name] = c_type.to_python( + getattr(ctype_object, name), + result, + *args, **kwargs + ) if is_cond else c_type.to_python( getattr(ctype_object, name), *args, **kwargs ) @@ -296,7 +326,7 @@ def _init_python_map(cls): """ from pyignite.datatypes import ( LongObject, DoubleObject, String, BoolObject, Null, UUIDObject, - DateObject, TimeObject, DecimalObject, + DateObject, TimeObject, DecimalObject, ByteArrayObject, ) cls._python_map = { @@ -304,6 +334,7 @@ def _init_python_map(cls): float: DoubleObject, str: String, bytes: String, + bytearray: ByteArrayObject, bool: BoolObject, type(None): Null, uuid.UUID: UUIDObject, @@ -340,7 +371,7 @@ def _init_python_array_map(cls): @classmethod def map_python_type(cls, value): from pyignite.datatypes import ( - MapObject, ObjectArrayObject, BinaryObject, + MapObject, CollectionObject, BinaryObject, ) if cls._python_map is None: @@ -349,12 +380,12 @@ def map_python_type(cls, value): cls._init_python_array_map() value_type = type(value) - if is_iterable(value) and value_type is not str: + if is_iterable(value) and value_type not in (str, bytearray, bytes): value_subtype = cls.get_subtype(value) if value_subtype in cls._python_array_map: return cls._python_array_map[value_subtype] - # a little heuristics (order may be important) + # a little heuristics (order is important) if all([ value_subtype is None, len(value) == 2, @@ -369,7 +400,9 @@ def map_python_type(cls, value): isinstance(value[0], int), is_iterable(value[1]), ]): - return ObjectArrayObject + return CollectionObject + + # no default for ObjectArrayObject, sorry raise TypeError( 'Type `array of {}` is invalid'.format(value_subtype) @@ -465,8 +498,8 @@ def from_python(self, value): value = [value] length = 1 header.length = length - buffer = bytes(header) + buffer = bytearray(header) for x in value: buffer += infer_from_python(x) - return buffer + return bytes(buffer) diff --git a/pyignite/datatypes/key_value.py b/pyignite/datatypes/key_value.py index 0f21ac6..ee2ae7b 100644 --- a/pyignite/datatypes/key_value.py +++ b/pyignite/datatypes/key_value.py @@ -18,7 +18,9 @@ class PeekModes(ByteArray): - ALL = 0 - NEAR = 1 - PRIMARY = 2 - BACKUP = 3 + ALL = 1 + NEAR = 2 + PRIMARY = 4 + BACKUP = 8 + ONHEAP = 16 + OFFHEAP = 32 diff --git a/pyignite/datatypes/null_object.py b/pyignite/datatypes/null_object.py index a648e30..19b41c7 100644 --- a/pyignite/datatypes/null_object.py +++ b/pyignite/datatypes/null_object.py @@ -20,6 +20,7 @@ """ import ctypes +from typing import Any from .base import IgniteDataType from .type_codes import TC_NULL @@ -33,6 +34,11 @@ class Null(IgniteDataType): pythonic = type(None) _object_c_type = None + @staticmethod + def hashcode(value: Any) -> int: + # Null object can not be a cache key. + return 0 + @classmethod def build_c_type(cls): if cls._object_c_type is None: diff --git a/pyignite/datatypes/primitive.py b/pyignite/datatypes/primitive.py index d1e9f4e..23d070d 100644 --- a/pyignite/datatypes/primitive.py +++ b/pyignite/datatypes/primitive.py @@ -17,6 +17,8 @@ from pyignite.constants import * from .base import IgniteDataType +from .type_ids import * +from .type_names import * __all__ = [ @@ -38,7 +40,8 @@ class Primitive(IgniteDataType): - Char, - Bool. """ - + _type_name = None + _type_id = None c_type = None @classmethod @@ -55,30 +58,44 @@ def from_python(cls, value): class Byte(Primitive): + _type_name = NAME_BYTE + _type_id = TYPE_BYTE c_type = ctypes.c_byte class Short(Primitive): + _type_name = NAME_SHORT + _type_id = TYPE_SHORT c_type = ctypes.c_short class Int(Primitive): + _type_name = NAME_INT + _type_id = TYPE_INT c_type = ctypes.c_int class Long(Primitive): + _type_name = NAME_LONG + _type_id = TYPE_LONG c_type = ctypes.c_longlong class Float(Primitive): + _type_name = NAME_FLOAT + _type_id = TYPE_FLOAT c_type = ctypes.c_float class Double(Primitive): + _type_name = NAME_DOUBLE + _type_id = TYPE_DOUBLE c_type = ctypes.c_double class Char(Primitive): + _type_name = NAME_CHAR + _type_id = TYPE_CHAR c_type = ctypes.c_short @classmethod @@ -103,4 +120,6 @@ def from_python(cls, value): class Bool(Primitive): + _type_name = NAME_BOOLEAN + _type_id = TYPE_BOOLEAN c_type = ctypes.c_bool diff --git a/pyignite/datatypes/primitive_arrays.py b/pyignite/datatypes/primitive_arrays.py index 6a93191..bca4fd9 100644 --- a/pyignite/datatypes/primitive_arrays.py +++ b/pyignite/datatypes/primitive_arrays.py @@ -14,11 +14,14 @@ # limitations under the License. import ctypes +from typing import Any from pyignite.constants import * from .base import IgniteDataType from .primitive import * from .type_codes import * +from .type_ids import * +from .type_names import * __all__ = [ @@ -33,9 +36,16 @@ class PrimitiveArray(IgniteDataType): """ Base class for array of primitives. Payload-only. """ + _type_name = None + _type_id = None primitive_type = None type_code = None + @staticmethod + def hashcode(value: Any) -> int: + # Arrays are not supported as keys at the moment. + return 0 + @classmethod def build_header_class(cls): return type( @@ -87,49 +97,79 @@ def from_python(cls, value): ) length = len(value) header.length = length - buffer = bytes(header) + buffer = bytearray(header) for x in value: buffer += cls.primitive_type.from_python(x) - return buffer + return bytes(buffer) class ByteArray(PrimitiveArray): + _type_name = NAME_BYTE_ARR + _type_id = TYPE_BYTE_ARR primitive_type = Byte type_code = TC_BYTE_ARRAY + @classmethod + def to_python(cls, ctype_object, *args, **kwargs): + return bytearray(ctype_object.data) + + @classmethod + def from_python(cls, value): + header_class = cls.build_header_class() + header = header_class() + + # no need to iterate on bytes or bytearray + # to create ByteArray data buffer + header.length = len(value) + return bytes(bytearray(header) + bytearray(value)) + class ShortArray(PrimitiveArray): + _type_name = NAME_SHORT_ARR + _type_id = TYPE_SHORT_ARR primitive_type = Short type_code = TC_SHORT_ARRAY class IntArray(PrimitiveArray): + _type_name = NAME_INT_ARR + _type_id = TYPE_INT_ARR primitive_type = Int type_code = TC_INT_ARRAY class LongArray(PrimitiveArray): + _type_name = NAME_LONG_ARR + _type_id = TYPE_LONG_ARR primitive_type = Long type_code = TC_LONG_ARRAY class FloatArray(PrimitiveArray): + _type_name = NAME_FLOAT_ARR + _type_id = TYPE_FLOAT_ARR primitive_type = Float type_code = TC_FLOAT_ARRAY class DoubleArray(PrimitiveArray): + _type_name = NAME_DOUBLE_ARR + _type_id = TYPE_DOUBLE_ARR primitive_type = Double type_code = TC_DOUBLE_ARRAY class CharArray(PrimitiveArray): + _type_name = NAME_CHAR_ARR + _type_id = TYPE_CHAR_ARR primitive_type = Char type_code = TC_CHAR_ARRAY class BoolArray(PrimitiveArray): + _type_name = NAME_BOOLEAN_ARR + _type_id = TYPE_BOOLEAN_ARR primitive_type = Bool type_code = TC_BOOL_ARRAY @@ -138,6 +178,8 @@ class PrimitiveArrayObject(PrimitiveArray): """ Base class for primitive array object. Type code plus payload. """ + _type_name = None + _type_id = None pythonic = list default = [] @@ -157,36 +199,83 @@ def build_header_class(cls): class ByteArrayObject(PrimitiveArrayObject): + _type_name = NAME_BYTE_ARR + _type_id = TYPE_BYTE_ARR primitive_type = Byte type_code = TC_BYTE_ARRAY + @classmethod + def to_python(cls, ctype_object, *args, **kwargs): + return ByteArray.to_python(ctype_object, *args, **kwargs) + + @classmethod + def from_python(cls, value): + header_class = cls.build_header_class() + header = header_class() + header.type_code = int.from_bytes( + cls.type_code, + byteorder=PROTOCOL_BYTE_ORDER + ) + + # no need to iterate on bytes or bytearray + # to create ByteArrayObject data buffer + header.length = len(value) + try: + # `value` is a `bytearray` or a sequence of integer values + # in range 0 to 255 + value_buffer = bytearray(value) + except ValueError: + # `value` is a sequence of integers in range -128 to 127 + value_buffer = bytearray() + for ch in value: + if -128 <= ch <= 255: + value_buffer.append(ctypes.c_ubyte(ch).value) + else: + raise ValueError( + 'byte must be in range(-128, 256)!' + ) from None + + return bytes(bytearray(header) + value_buffer) + class ShortArrayObject(PrimitiveArrayObject): + _type_name = NAME_SHORT_ARR + _type_id = TYPE_SHORT_ARR primitive_type = Short type_code = TC_SHORT_ARRAY class IntArrayObject(PrimitiveArrayObject): + _type_name = NAME_INT_ARR + _type_id = TYPE_INT_ARR primitive_type = Int type_code = TC_INT_ARRAY class LongArrayObject(PrimitiveArrayObject): + _type_name = NAME_LONG_ARR + _type_id = TYPE_LONG_ARR primitive_type = Long type_code = TC_LONG_ARRAY class FloatArrayObject(PrimitiveArrayObject): + _type_name = NAME_FLOAT_ARR + _type_id = TYPE_FLOAT_ARR primitive_type = Float type_code = TC_FLOAT_ARRAY class DoubleArrayObject(PrimitiveArrayObject): + _type_name = NAME_DOUBLE_ARR + _type_id = TYPE_DOUBLE_ARR primitive_type = Double type_code = TC_DOUBLE_ARRAY class CharArrayObject(PrimitiveArrayObject): + _type_name = NAME_CHAR_ARR + _type_id = TYPE_CHAR_ARR primitive_type = Char type_code = TC_CHAR_ARRAY @@ -204,5 +293,7 @@ def to_python(cls, ctype_object, *args, **kwargs): class BoolArrayObject(PrimitiveArrayObject): + _type_name = NAME_BOOLEAN_ARR + _type_id = TYPE_BOOLEAN_ARR primitive_type = Bool type_code = TC_BOOL_ARRAY diff --git a/pyignite/datatypes/primitive_objects.py b/pyignite/datatypes/primitive_objects.py index 105acee..0bd0ec6 100644 --- a/pyignite/datatypes/primitive_objects.py +++ b/pyignite/datatypes/primitive_objects.py @@ -16,8 +16,11 @@ import ctypes from pyignite.constants import * +from pyignite.utils import unsigned from .base import IgniteDataType from .type_codes import * +from .type_ids import * +from .type_names import * __all__ = [ @@ -33,10 +36,11 @@ class DataObject(IgniteDataType): Primitive data objects are built of primitive data prepended by the corresponding type code. """ - + _type_name = None + _type_id = None + _object_c_type = None c_type = None type_code = None - _object_c_type = None @classmethod def build_c_type(cls): @@ -77,46 +81,89 @@ def from_python(cls, value): class ByteObject(DataObject): + _type_name = NAME_BYTE + _type_id = TYPE_BYTE c_type = ctypes.c_byte type_code = TC_BYTE pythonic = int default = 0 + @staticmethod + def hashcode(value: int, *args, **kwargs) -> int: + return value + class ShortObject(DataObject): + _type_name = NAME_SHORT + _type_id = TYPE_SHORT c_type = ctypes.c_short type_code = TC_SHORT pythonic = int default = 0 + @staticmethod + def hashcode(value: int, *args, **kwargs) -> int: + return value + class IntObject(DataObject): + _type_name = NAME_INT + _type_id = TYPE_INT c_type = ctypes.c_int type_code = TC_INT pythonic = int default = 0 + @staticmethod + def hashcode(value: int, *args, **kwargs) -> int: + return value + class LongObject(DataObject): + _type_name = NAME_LONG + _type_id = TYPE_LONG c_type = ctypes.c_longlong type_code = TC_LONG pythonic = int default = 0 + @staticmethod + def hashcode(value: int, *args, **kwargs) -> int: + return value ^ (unsigned(value, ctypes.c_ulonglong) >> 32) + class FloatObject(DataObject): + _type_name = NAME_FLOAT + _type_id = TYPE_FLOAT c_type = ctypes.c_float type_code = TC_FLOAT pythonic = float default = 0.0 + @staticmethod + def hashcode(value: float, *args, **kwargs) -> int: + return ctypes.cast( + ctypes.pointer(ctypes.c_float(value)), + ctypes.POINTER(ctypes.c_int) + ).contents.value + class DoubleObject(DataObject): + _type_name = NAME_DOUBLE + _type_id = TYPE_DOUBLE c_type = ctypes.c_double type_code = TC_DOUBLE pythonic = float default = 0.0 + @staticmethod + def hashcode(value: float, *args, **kwargs) -> int: + bits = ctypes.cast( + ctypes.pointer(ctypes.c_double(value)), + ctypes.POINTER(ctypes.c_longlong) + ).contents.value + return (bits & 0xffffffff) ^ (unsigned(bits, ctypes.c_longlong) >> 32) + class CharObject(DataObject): """ @@ -125,11 +172,17 @@ class CharObject(DataObject): to/from UTF-8 to keep the coding hassle to minimum. Bear in mind though: decoded character may take 1..4 bytes in UTF-8. """ + _type_name = NAME_CHAR + _type_id = TYPE_CHAR c_type = ctypes.c_short type_code = TC_CHAR pythonic = str default = ' ' + @staticmethod + def hashcode(value: str, *args, **kwargs) -> int: + return ord(value) + @classmethod def to_python(cls, ctype_object, *args, **kwargs): return ctype_object.value.to_bytes( @@ -152,7 +205,13 @@ def from_python(cls, value): class BoolObject(DataObject): + _type_name = NAME_BOOLEAN + _type_id = TYPE_BOOLEAN c_type = ctypes.c_bool type_code = TC_BOOL pythonic = bool default = False + + @staticmethod + def hashcode(value: bool, *args, **kwargs) -> int: + return 1231 if value else 1237 diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index 8808da2..c65cae4 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -17,11 +17,15 @@ from datetime import date, datetime, time, timedelta import decimal from math import ceil +from typing import Any, Tuple import uuid from pyignite.constants import * +from pyignite.utils import datetime_hashcode, decimal_hashcode, hashcode from .base import IgniteDataType from .type_codes import * +from .type_ids import * +from .type_names import * from .null_object import Null @@ -41,6 +45,8 @@ class StandardObject(IgniteDataType): + _type_name = None + _type_id = None type_code = None @classmethod @@ -64,9 +70,15 @@ class String(IgniteDataType): Pascal-style string: `c_int` counter, followed by count*bytes. UTF-8-encoded, so that one character may take 1 to 4 bytes. """ + _type_name = NAME_STRING + _type_id = TYPE_STRING type_code = TC_STRING pythonic = str + @staticmethod + def hashcode(value: str, *args, **kwargs) -> int: + return hashcode(value) + @classmethod def build_c_type(cls, length: int): return type( @@ -127,10 +139,16 @@ def from_python(cls, value): class DecimalObject(IgniteDataType): + _type_name = NAME_DECIMAL + _type_id = TYPE_DECIMAL type_code = TC_DECIMAL pythonic = decimal.Decimal default = decimal.Decimal('0.00') + @staticmethod + def hashcode(value: decimal.Decimal, *args, **kwargs) -> int: + return decimal_hashcode(value) + @classmethod def build_c_header(cls): return type( @@ -251,11 +269,22 @@ class UUIDObject(StandardObject): and :py:meth:`~pyignite.datatypes.standard.UUIDObject.from_python` methods is changed for compatibility with `java.util.UUID`. """ - type_code = TC_UUID + _type_name = NAME_UUID + _type_id = TYPE_UUID _object_c_type = None + type_code = TC_UUID UUID_BYTE_ORDER = (7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8) + UUID_BYTE_ORDER = (7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8) + + @staticmethod + def hashcode(value: 'UUID', *args, **kwargs) -> int: + msb = value.int >> 64 + lsb = value.int & 0xffffffffffffffff + hilo = msb ^ lsb + return (hilo >> 32) ^ (hilo & 0xffffffff) + @classmethod def build_c_type(cls): if cls._object_c_type is None: @@ -308,10 +337,16 @@ class TimestampObject(StandardObject): `epoch` and `fraction` stored separately and represented as tuple(datetime.datetime, integer). """ + _type_name = NAME_TIMESTAMP + _type_id = TYPE_TIMESTAMP + _object_c_type = None type_code = TC_TIMESTAMP pythonic = tuple default = (datetime(1970, 1, 1), 0) - _object_c_type = None + + @staticmethod + def hashcode(value: Tuple[datetime, int], *args, **kwargs) -> int: + return datetime_hashcode(int(value[0].timestamp() * 1000)) @classmethod def build_c_type(cls): @@ -364,10 +399,16 @@ class DateObject(StandardObject): Represented as a naive datetime.datetime in Python. """ + _type_name = NAME_DATE + _type_id = TYPE_DATE + _object_c_type = None type_code = TC_DATE pythonic = datetime default = datetime(1970, 1, 1) - _object_c_type = None + + @staticmethod + def hashcode(value: datetime, *args, **kwargs) -> int: + return datetime_hashcode(int(value.timestamp() * 1000)) @classmethod def build_c_type(cls): @@ -416,10 +457,16 @@ class TimeObject(StandardObject): Represented as a datetime.timedelta in Python. """ + _type_name = NAME_TIME + _type_id = TYPE_TIME + _object_c_type = None type_code = TC_TIME pythonic = timedelta default = timedelta() - _object_c_type = None + + @staticmethod + def hashcode(value: timedelta, *args, **kwargs) -> int: + return datetime_hashcode(int(value.total_seconds() * 1000)) @classmethod def build_c_type(cls): @@ -468,8 +515,10 @@ class EnumObject(StandardObject): (using language-specific type serialization is a good way to kill the interoperability though), so it represented by tuple(int, int) in Python. """ - type_code = TC_ENUM + _type_name = 'Enum' + _type_id = TYPE_ENUM _object_c_type = None + type_code = TC_ENUM @classmethod def build_c_type(cls): @@ -518,6 +567,8 @@ class BinaryEnumObject(EnumObject): """ Another way of representing the enum type. Same, but different. """ + _type_name = 'Enum' + _type_id = TYPE_BINARY_ENUM type_code = TC_BINARY_ENUM @@ -525,9 +576,16 @@ class StandardArray(IgniteDataType): """ Base class for array of primitives. Payload-only. """ + _type_name = None + _type_id = None standard_type = None type_code = None + @staticmethod + def hashcode(value: Any) -> int: + # Arrays are not supported as keys at the moment. + return 0 + @classmethod def build_header_class(cls): return type( @@ -585,11 +643,11 @@ def from_python(cls, value): ) length = len(value) header.length = length - buffer = bytes(header) + buffer = bytearray(header) for x in value: buffer += cls.standard_type.from_python(x) - return buffer + return bytes(buffer) class StringArray(StandardArray): @@ -599,34 +657,50 @@ class StringArray(StandardArray): List(str) in Python. """ + _type_name = NAME_STRING_ARR + _type_id = TYPE_STRING_ARR standard_type = String class DecimalArray(StandardArray): + _type_name = NAME_DECIMAL_ARR + _type_id = TYPE_DECIMAL_ARR standard_type = DecimalObject class UUIDArray(StandardArray): + _type_name = NAME_UUID_ARR + _type_id = TYPE_UUID_ARR standard_type = UUIDObject class TimestampArray(StandardArray): + _type_name = NAME_TIMESTAMP_ARR + _type_id = TYPE_TIMESTAMP_ARR standard_type = TimestampObject class DateArray(StandardArray): + _type_name = NAME_DATE_ARR + _type_id = TYPE_DATE_ARR standard_type = DateObject class TimeArray(StandardArray): + _type_name = NAME_TIME_ARR + _type_id = TYPE_TIME_ARR standard_type = TimeObject class EnumArray(StandardArray): + _type_name = 'Enum[]' + _type_id = TYPE_ENUM_ARR standard_type = EnumObject class StandardArrayObject(StandardArray): + _type_name = None + _type_id = None pythonic = list default = [] @@ -647,18 +721,24 @@ def build_header_class(cls): class StringArrayObject(StandardArrayObject): """ List of strings. """ + _type_name = NAME_STRING_ARR + _type_id = TYPE_STRING_ARR standard_type = String type_code = TC_STRING_ARRAY class DecimalArrayObject(StandardArrayObject): """ List of decimal.Decimal objects. """ + _type_name = NAME_DECIMAL_ARR + _type_id = TYPE_DECIMAL_ARR standard_type = DecimalObject type_code = TC_DECIMAL_ARRAY class UUIDArrayObject(StandardArrayObject): - """ Translated into Python as a list(uuid.UUID)""" + """ Translated into Python as a list(uuid.UUID). """ + _type_name = NAME_UUID_ARR + _type_id = TYPE_UUID_ARR standard_type = UUIDObject type_code = TC_UUID_ARRAY @@ -667,18 +747,24 @@ class TimestampArrayObject(StandardArrayObject): """ Translated into Python as a list of (datetime.datetime, integer) tuples. """ + _type_name = NAME_TIMESTAMP_ARR + _type_id = TYPE_TIMESTAMP_ARR standard_type = TimestampObject type_code = TC_TIMESTAMP_ARRAY class DateArrayObject(StandardArrayObject): """ List of datetime.datetime type values. """ + _type_name = NAME_DATE_ARR + _type_id = TYPE_DATE_ARR standard_type = DateObject type_code = TC_DATE_ARRAY class TimeArrayObject(StandardArrayObject): """ List of datetime.timedelta type values. """ + _type_name = NAME_TIME_ARR + _type_id = TYPE_TIME_ARR standard_type = TimeObject type_code = TC_TIME_ARRAY @@ -688,6 +774,8 @@ class EnumArrayObject(StandardArrayObject): Array of (int, int) tuples, plus it holds a `type_id` in its header. The only `type_id` value of -1 (user type) works from Python perspective. """ + _type_name = 'Enum[]' + _type_id = TYPE_ENUM_ARR standard_type = EnumObject type_code = TC_ENUM_ARRAY @@ -719,11 +807,11 @@ def from_python(cls, value): length = len(value) header.length = length header.type_id = type_id - buffer = bytes(header) + buffer = bytearray(header) for x in value: buffer += cls.standard_type.from_python(x) - return buffer + return bytes(buffer) @classmethod def to_python(cls, ctype_object, *args, **kwargs): diff --git a/docker-compose.yml b/pyignite/datatypes/type_ids.py similarity index 54% rename from docker-compose.yml rename to pyignite/datatypes/type_ids.py index 2517d25..be2d9c3 100644 --- a/docker-compose.yml +++ b/pyignite/datatypes/type_ids.py @@ -13,22 +13,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -services: - ignite: - image: apacheignite/ignite:latest - ports: - - 10800:10800 - restart: always - network_mode: host - - ignite-ssl: - image: apacheignite/ignite:latest - ports: - - 10800:10800 - restart: always - network_mode: host - volumes: - - ./tests/config:/config - environment: - CONFIG_URI: /config/ssl.xml - PYTHON_TEST_CONFIG_PATH: /config +TYPE_BYTE = 1 +TYPE_SHORT = 2 +TYPE_INT = 3 +TYPE_LONG = 4 +TYPE_FLOAT = 5 +TYPE_DOUBLE = 6 +TYPE_CHAR = 7 +TYPE_BOOLEAN = 8 +TYPE_STRING = 9 +TYPE_UUID = 10 +TYPE_DATE = 11 +TYPE_BYTE_ARR = 12 +TYPE_SHORT_ARR = 13 +TYPE_INT_ARR = 14 +TYPE_LONG_ARR = 15 +TYPE_FLOAT_ARR = 16 +TYPE_DOUBLE_ARR = 17 +TYPE_CHAR_ARR = 18 +TYPE_BOOLEAN_ARR = 19 +TYPE_STRING_ARR = 20 +TYPE_UUID_ARR = 21 +TYPE_DATE_ARR = 22 +TYPE_OBJ_ARR = 23 +TYPE_COL = 24 +TYPE_MAP = 25 +TYPE_BINARY_OBJ = 27 +TYPE_ENUM = 28 +TYPE_ENUM_ARR = 29 +TYPE_DECIMAL = 30 +TYPE_DECIMAL_ARR = 31 +TYPE_CLASS = 32 +TYPE_TIMESTAMP = 33 +TYPE_TIMESTAMP_ARR = 34 +TYPE_PROXY = 35 +TYPE_TIME = 36 +TYPE_TIME_ARR = 37 +TYPE_BINARY_ENUM = 38 diff --git a/pyignite/datatypes/type_names.py b/pyignite/datatypes/type_names.py new file mode 100644 index 0000000..08ce75d --- /dev/null +++ b/pyignite/datatypes/type_names.py @@ -0,0 +1,46 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NAME_BYTE = 'java.lang.Byte' +NAME_SHORT = 'java.lang.Short' +NAME_INT = 'java.lang.Integer' +NAME_LONG = 'java.lang.Long' +NAME_FLOAT = 'java.lang.Float' +NAME_DOUBLE = 'java.land.Double' +NAME_CHAR = 'java.lang.Character' +NAME_BOOLEAN = 'java.lang.Boolean' +NAME_STRING = 'java.lang.String' +NAME_UUID = 'java.util.UUID' +NAME_DATE = 'java.util.Date' +NAME_BYTE_ARR = 'class [B' +NAME_SHORT_ARR = 'class [S' +NAME_INT_ARR = 'class [I' +NAME_LONG_ARR = 'class [J' +NAME_FLOAT_ARR = 'class [F' +NAME_DOUBLE_ARR = 'class [D' +NAME_CHAR_ARR = 'class [C' +NAME_BOOLEAN_ARR = 'class [Z' +NAME_STRING_ARR = 'class [Ljava.lang.String;' +NAME_UUID_ARR = 'class [Ljava.util.UUID;' +NAME_DATE_ARR = 'class [Ljava.util.Date;' +NAME_OBJ_ARR = 'class [Ljava.lang.Object;' +NAME_COL = 'java.util.Collection' +NAME_MAP = 'java.util.Map' +NAME_DECIMAL = 'java.math.BigDecimal' +NAME_DECIMAL_ARR = 'class [Ljava.math.BigDecimal;' +NAME_TIMESTAMP = 'java.sql.Timestamp' +NAME_TIMESTAMP_ARR = 'class [Ljava.sql.Timestamp;' +NAME_TIME = 'java.sql.Time' +NAME_TIME_ARR = 'class [Ljava.sql.Time;' diff --git a/pyignite/exceptions.py b/pyignite/exceptions.py index 2bc5996..1b41d32 100644 --- a/pyignite/exceptions.py +++ b/pyignite/exceptions.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Tuple from socket import error as SocketError @@ -30,7 +31,10 @@ class HandshakeError(SocketError): as defined in https://apacheignite.readme.io/docs/binary-client-protocol#section-handshake """ - pass + + def __init__(self, expected_version: Tuple[int, int, int], message: str): + self.expected_version = expected_version + self.message = message class ReconnectError(Exception): @@ -78,3 +82,6 @@ class SQLError(CacheError): An error in SQL query. """ pass + + +connection_errors = (IOError, OSError) diff --git a/pyignite/queries/__init__.py b/pyignite/queries/__init__.py index 2c2d254..3029f87 100644 --- a/pyignite/queries/__init__.py +++ b/pyignite/queries/__init__.py @@ -21,319 +21,4 @@ :mod:`pyignite.datatypes` binary parser/generator classes. """ -from collections import OrderedDict -import ctypes -from random import randint - -import attr - -from pyignite.api.result import APIResult -from pyignite.constants import * -from pyignite.datatypes import ( - AnyDataObject, Bool, Int, Long, String, StringArray, Struct, -) -from .op_codes import * - - -@attr.s -class Response: - following = attr.ib(type=list, factory=list) - _response_header = None - - def __attrs_post_init__(self): - # replace None with empty list - self.following = self.following or [] - - @classmethod - def build_header(cls): - if cls._response_header is None: - cls._response_header = type( - 'ResponseHeader', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('query_id', ctypes.c_longlong), - ('status_code', ctypes.c_int), - ], - }, - ) - return cls._response_header - - def parse(self, client: 'Client'): - header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - - if header.status_code == OP_SUCCESS: - for name, ignite_type in self.following: - c_type, buffer_fragment = ignite_type.parse(client) - buffer += buffer_fragment - fields.append((name, c_type)) - else: - c_type, buffer_fragment = String.parse(client) - buffer += buffer_fragment - fields.append(('error_message', c_type)) - - response_class = type( - 'Response', - (header_class,), - { - '_pack_': 1, - '_fields_': fields, - } - ) - return response_class, buffer - - def to_python(self, ctype_object, *args, **kwargs): - result = OrderedDict() - - for name, c_type in self.following: - result[name] = c_type.to_python( - getattr(ctype_object, name), - *args, **kwargs - ) - - return result if result else None - - -@attr.s -class SQLResponse(Response): - """ - The response class of SQL functions is special in the way the row-column - data is counted in it. Basically, Ignite thin client API is following a - “counter right before the counted objects” rule in most of its parts. - SQL ops are breaking this rule. - """ - include_field_names = attr.ib(type=bool, default=False) - has_cursor = attr.ib(type=bool, default=False) - - def fields_or_field_count(self): - if self.include_field_names: - return 'fields', StringArray - return 'field_count', Int - - def parse(self, client: 'Client'): - header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - - if header.status_code == OP_SUCCESS: - following = [ - self.fields_or_field_count(), - ('row_count', Int), - ] - if self.has_cursor: - following.insert(0, ('cursor', Long)) - body_struct = Struct(following) - body_class, body_buffer = body_struct.parse(client) - body = body_class.from_buffer_copy(body_buffer) - - if self.include_field_names: - field_count = body.fields.length - else: - field_count = body.field_count - - data_fields = [] - data_buffer = b'' - for i in range(body.row_count): - row_fields = [] - row_buffer = b'' - for j in range(field_count): - field_class, field_buffer = AnyDataObject.parse(client) - row_fields.append(('column_{}'.format(j), field_class)) - row_buffer += field_buffer - - row_class = type( - 'SQLResponseRow', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': row_fields, - } - ) - data_fields.append(('row_{}'.format(i), row_class)) - data_buffer += row_buffer - - data_class = type( - 'SQLResponseData', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': data_fields, - } - ) - fields += body_class._fields_ + [ - ('data', data_class), - ('more', ctypes.c_bool), - ] - buffer += body_buffer + data_buffer - else: - c_type, buffer_fragment = String.parse(client) - buffer += buffer_fragment - fields.append(('error_message', c_type)) - - final_class = type( - 'SQLResponse', - (header_class,), - { - '_pack_': 1, - '_fields_': fields, - } - ) - buffer += client.recv(ctypes.sizeof(final_class) - len(buffer)) - return final_class, buffer - - def to_python(self, ctype_object, *args, **kwargs): - if ctype_object.status_code == 0: - result = { - 'more': Bool.to_python( - ctype_object.more, *args, **kwargs - ), - 'data': [], - } - if hasattr(ctype_object, 'fields'): - result['fields'] = StringArray.to_python( - ctype_object.fields, *args, **kwargs - ) - else: - result['field_count'] = Int.to_python( - ctype_object.field_count, *args, **kwargs - ) - if hasattr(ctype_object, 'cursor'): - result['cursor'] = Long.to_python( - ctype_object.cursor, *args, **kwargs - ) - for row_item in ctype_object.data._fields_: - row_name = row_item[0] - row_object = getattr(ctype_object.data, row_name) - row = [] - for col_item in row_object._fields_: - col_name = col_item[0] - col_object = getattr(row_object, col_name) - row.append( - AnyDataObject.to_python(col_object, *args, **kwargs) - ) - result['data'].append(row) - return result - - -@attr.s -class Query: - op_code = attr.ib(type=int) - following = attr.ib(type=list, factory=list) - query_id = attr.ib(type=int, default=None) - _query_c_type = None - - @classmethod - def build_c_type(cls): - if cls._query_c_type is None: - cls._query_c_type = type( - cls.__name__, - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('op_code', ctypes.c_short), - ('query_id', ctypes.c_longlong), - ], - }, - ) - return cls._query_c_type - - def from_python(self, values: dict=None): - if values is None: - values = {} - buffer = b'' - - header_class = self.build_c_type() - header = header_class() - header.op_code = self.op_code - if self.query_id is None: - header.query_id = randint(MIN_LONG, MAX_LONG) - - for name, c_type in self.following: - buffer += c_type.from_python(values[name]) - - header.length = ( - len(buffer) - + ctypes.sizeof(header_class) - - ctypes.sizeof(ctypes.c_int) - ) - return header.query_id, bytes(header) + buffer - - def perform( - self, conn: 'Connection', query_params: dict=None, - response_config: list=None, - ) -> APIResult: - """ - Perform query and process result. - - :param conn: connection to Ignite server, - :param query_params: (optional) dict of named query parameters. - Defaults to no parameters, - :param response_config: (optional) response configuration − list of - (name, type_hint) tuples. Defaults to empty return value, - :return: instance of :class:`~pyignite.api.result.APIResult` with raw - value (may undergo further processing in API functions). - """ - _, send_buffer = self.from_python(query_params) - conn.send(send_buffer) - response_struct = Response(response_config) - response_ctype, recv_buffer = response_struct.parse(conn) - response = response_ctype.from_buffer_copy(recv_buffer) - result = APIResult(response) - if result.status == 0: - result.value = response_struct.to_python(response) - return result - - -class ConfigQuery(Query): - """ - This is a special query, used for creating caches with configuration. - """ - _query_c_type = None - - @classmethod - def build_c_type(cls): - if cls._query_c_type is None: - cls._query_c_type = type( - cls.__name__, - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('op_code', ctypes.c_short), - ('query_id', ctypes.c_longlong), - ('config_length', ctypes.c_int), - ], - }, - ) - return cls._query_c_type - - def from_python(self, values: dict = None): - if values is None: - values = {} - buffer = b'' - - header_class = self.build_c_type() - header = header_class() - header.op_code = self.op_code - if self.query_id is None: - header.query_id = randint(MIN_LONG, MAX_LONG) - - for name, c_type in self.following: - buffer += c_type.from_python(values[name]) - - header.length = ( - len(buffer) - + ctypes.sizeof(header_class) - - ctypes.sizeof(ctypes.c_int) - ) - header.config_length = header.length - ctypes.sizeof(header_class) - return header.query_id, bytes(header) + buffer +from .query import Query, ConfigQuery, get_response_class diff --git a/pyignite/queries/op_codes.py b/pyignite/queries/op_codes.py index 1396e83..7372713 100644 --- a/pyignite/queries/op_codes.py +++ b/pyignite/queries/op_codes.py @@ -43,6 +43,7 @@ OP_CACHE_REMOVE_KEYS = 1018 OP_CACHE_REMOVE_ALL = 1019 OP_CACHE_GET_SIZE = 1020 +OP_CACHE_LOCAL_PEEK = 1021 OP_CACHE_GET_NAMES = 1050 OP_CACHE_CREATE_WITH_NAME = 1051 @@ -51,6 +52,7 @@ OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION = 1054 OP_CACHE_GET_CONFIGURATION = 1055 OP_CACHE_DESTROY = 1056 +OP_CACHE_PARTITIONS = 1101 OP_QUERY_SCAN = 2000 OP_QUERY_SCAN_CURSOR_GET_PAGE = 2001 diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py new file mode 100644 index 0000000..0e7cfa3 --- /dev/null +++ b/pyignite/queries/query.py @@ -0,0 +1,164 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ctypes +from random import randint + +import attr + +from pyignite.api.result import APIResult +from pyignite.constants import * +from pyignite.queries import response + + +def get_response_class(obj: object, sql: bool = False): + """ + Response class factory. + + :param obj: cache, connection or client object, + :param sql: (optional) return normal (default) or SQL response class, + :return: response class. + """ + template = 'SQLResponse{}{}{}' if sql else 'Response{}{}{}' + return getattr(response, template.format(*obj.get_protocol_version())) + + +@attr.s +class Query: + op_code = attr.ib(type=int) + following = attr.ib(type=list, factory=list) + query_id = attr.ib(type=int, default=None) + _query_c_type = None + + @classmethod + def build_c_type(cls): + if cls._query_c_type is None: + cls._query_c_type = type( + cls.__name__, + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('length', ctypes.c_int), + ('op_code', ctypes.c_short), + ('query_id', ctypes.c_longlong), + ], + }, + ) + return cls._query_c_type + + def from_python(self, values: dict = None): + if values is None: + values = {} + buffer = b'' + + header_class = self.build_c_type() + header = header_class() + header.op_code = self.op_code + if self.query_id is None: + header.query_id = randint(MIN_LONG, MAX_LONG) + + for name, c_type in self.following: + buffer += c_type.from_python(values[name]) + + header.length = ( + len(buffer) + + ctypes.sizeof(header_class) + - ctypes.sizeof(ctypes.c_int) + ) + return header.query_id, bytes(header) + buffer + + def perform( + self, conn: 'Connection', query_params: dict = None, + response_config: list = None, sql: bool = False, **kwargs, + ) -> APIResult: + """ + Perform query and process result. + + :param conn: connection to Ignite server, + :param query_params: (optional) dict of named query parameters. + Defaults to no parameters, + :param response_config: (optional) response configuration − list of + (name, type_hint) tuples. Defaults to empty return value, + :param sql: (optional) use normal (default) or SQL response class, + :return: instance of :class:`~pyignite.api.result.APIResult` with raw + value (may undergo further processing in API functions). + """ + _, send_buffer = self.from_python(query_params) + conn.send(send_buffer) + response_class = get_response_class(conn, sql) + response_struct = response_class(response_config, **kwargs) + response_ctype, recv_buffer = response_struct.parse(conn) + response = response_ctype.from_buffer_copy(recv_buffer) + + # this test depends on protocol version + if getattr(response, 'flags', False) & RHF_TOPOLOGY_CHANGED: + # update latest affinity version + conn.client.affinity_version = ( + response.affinity_version, response.affinity_minor + ) + + # build result + result = APIResult(response) + if result.status == 0: + result.value = response_struct.to_python(response) + return result + + +class ConfigQuery(Query): + """ + This is a special query, used for creating caches with configuration. + """ + _query_c_type = None + + @classmethod + def build_c_type(cls): + if cls._query_c_type is None: + cls._query_c_type = type( + cls.__name__, + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('length', ctypes.c_int), + ('op_code', ctypes.c_short), + ('query_id', ctypes.c_longlong), + ('config_length', ctypes.c_int), + ], + }, + ) + return cls._query_c_type + + def from_python(self, values: dict = None): + if values is None: + values = {} + buffer = b'' + + header_class = self.build_c_type() + header = header_class() + header.op_code = self.op_code + if self.query_id is None: + header.query_id = randint(MIN_LONG, MAX_LONG) + + for name, c_type in self.following: + buffer += c_type.from_python(values[name]) + + header.length = ( + len(buffer) + + ctypes.sizeof(header_class) + - ctypes.sizeof(ctypes.c_int) + ) + header.config_length = header.length - ctypes.sizeof(header_class) + return header.query_id, bytes(header) + buffer diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py new file mode 100644 index 0000000..5fb4879 --- /dev/null +++ b/pyignite/queries/response.py @@ -0,0 +1,428 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import ctypes + +import attr + +from pyignite.constants import * +from pyignite.datatypes import ( + AnyDataObject, Bool, Int, Long, String, StringArray, Struct, +) +from .op_codes import * + + +@attr.s +class Response140: + following = attr.ib(type=list, factory=list) + _response_header = None + + def __attrs_post_init__(self): + # replace None with empty list + self.following = self.following or [] + + @classmethod + def build_header(cls): + if cls._response_header is None: + cls._response_header = type( + 'ResponseHeader', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('length', ctypes.c_int), + ('query_id', ctypes.c_longlong), + ('flags', ctypes.c_short), + ], + }, + ) + return cls._response_header + + def parse(self, conn: 'Connection'): + header_class = self.build_header() + buffer = conn.recv(ctypes.sizeof(header_class)) + header = header_class.from_buffer_copy(buffer) + fields = [] + + if header.flags & RHF_TOPOLOGY_CHANGED: + fields = [ + ('affinity_version', ctypes.c_longlong), + ('affinity_minor', ctypes.c_int), + ] + + if header.flags & RHF_ERROR: + fields.append(('status_code', ctypes.c_int)) + buffer += conn.recv( + sum([ctypes.sizeof(field[1]) for field in fields]) + ) + msg_type, buffer_fragment = String.parse(conn) + buffer += buffer_fragment + fields.append(('error_message', msg_type)) + + else: + buffer += conn.recv( + sum([ctypes.sizeof(field[1]) for field in fields]) + ) + for name, ignite_type in self.following: + c_type, buffer_fragment = ignite_type.parse(conn) + buffer += buffer_fragment + fields.append((name, c_type)) + + response_class = type( + 'Response', + (header_class,), + { + '_pack_': 1, + '_fields_': fields, + } + ) + return response_class, buffer + + def to_python(self, ctype_object, *args, **kwargs): + result = OrderedDict() + + for name, c_type in self.following: + result[name] = c_type.to_python( + getattr(ctype_object, name), + *args, **kwargs + ) + + return result if result else None + + +@attr.s +class SQLResponse140(Response140): + """ + The response class of SQL functions is special in the way the row-column + data is counted in it. Basically, Ignite thin client API is following a + “counter right before the counted objects” rule in most of its parts. + SQL ops are breaking this rule. + """ + include_field_names = attr.ib(type=bool, default=False) + has_cursor = attr.ib(type=bool, default=False) + + def fields_or_field_count(self): + if self.include_field_names: + return 'fields', StringArray + return 'field_count', Int + + def parse(self, conn: 'Connection'): + header_class = self.build_header() + buffer = conn.recv(ctypes.sizeof(header_class)) + header = header_class.from_buffer_copy(buffer) + fields = [] + + if header.flags & RHF_TOPOLOGY_CHANGED: + fields = [ + ('affinity_version', ctypes.c_longlong), + ('affinity_minor', ctypes.c_int), + ] + + if header.flags & RHF_ERROR: + fields.append(('status_code', ctypes.c_int)) + buffer += conn.recv( + sum([ctypes.sizeof(field[1]) for field in fields]) + ) + msg_type, buffer_fragment = String.parse(conn) + buffer += buffer_fragment + fields.append(('error_message', msg_type)) + else: + buffer += conn.recv( + sum([ctypes.sizeof(field[1]) for field in fields]) + ) + following = [ + self.fields_or_field_count(), + ('row_count', Int), + ] + if self.has_cursor: + following.insert(0, ('cursor', Long)) + body_struct = Struct(following) + body_class, body_buffer = body_struct.parse(conn) + body = body_class.from_buffer_copy(body_buffer) + + if self.include_field_names: + field_count = body.fields.length + else: + field_count = body.field_count + + data_fields = [] + data_buffer = b'' + for i in range(body.row_count): + row_fields = [] + row_buffer = b'' + for j in range(field_count): + field_class, field_buffer = AnyDataObject.parse(conn) + row_fields.append(('column_{}'.format(j), field_class)) + row_buffer += field_buffer + + row_class = type( + 'SQLResponseRow', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': row_fields, + } + ) + data_fields.append(('row_{}'.format(i), row_class)) + data_buffer += row_buffer + + data_class = type( + 'SQLResponseData', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': data_fields, + } + ) + fields += body_class._fields_ + [ + ('data', data_class), + ('more', ctypes.c_bool), + ] + buffer += body_buffer + data_buffer + + final_class = type( + 'SQLResponse', + (header_class,), + { + '_pack_': 1, + '_fields_': fields, + } + ) + buffer += conn.recv(ctypes.sizeof(final_class) - len(buffer)) + return final_class, buffer + + def to_python(self, ctype_object, *args, **kwargs): + if not hasattr(ctype_object, 'status_code'): + result = { + 'more': Bool.to_python( + ctype_object.more, *args, **kwargs + ), + 'data': [], + } + if hasattr(ctype_object, 'fields'): + result['fields'] = StringArray.to_python( + ctype_object.fields, *args, **kwargs + ) + else: + result['field_count'] = Int.to_python( + ctype_object.field_count, *args, **kwargs + ) + if hasattr(ctype_object, 'cursor'): + result['cursor'] = Long.to_python( + ctype_object.cursor, *args, **kwargs + ) + for row_item in ctype_object.data._fields_: + row_name = row_item[0] + row_object = getattr(ctype_object.data, row_name) + row = [] + for col_item in row_object._fields_: + col_name = col_item[0] + col_object = getattr(row_object, col_name) + row.append( + AnyDataObject.to_python(col_object, *args, **kwargs) + ) + result['data'].append(row) + return result + + +@attr.s +class Response130: + following = attr.ib(type=list, factory=list) + _response_header = None + + def __attrs_post_init__(self): + # replace None with empty list + self.following = self.following or [] + + @classmethod + def build_header(cls): + if cls._response_header is None: + cls._response_header = type( + 'ResponseHeader', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('length', ctypes.c_int), + ('query_id', ctypes.c_longlong), + ('status_code', ctypes.c_int), + ], + }, + ) + return cls._response_header + + def parse(self, client: 'Client'): + header_class = self.build_header() + buffer = client.recv(ctypes.sizeof(header_class)) + header = header_class.from_buffer_copy(buffer) + fields = [] + + if header.status_code == OP_SUCCESS: + for name, ignite_type in self.following: + c_type, buffer_fragment = ignite_type.parse(client) + buffer += buffer_fragment + fields.append((name, c_type)) + else: + c_type, buffer_fragment = String.parse(client) + buffer += buffer_fragment + fields.append(('error_message', c_type)) + + response_class = type( + 'Response', + (header_class,), + { + '_pack_': 1, + '_fields_': fields, + } + ) + return response_class, buffer + + def to_python(self, ctype_object, *args, **kwargs): + result = OrderedDict() + + for name, c_type in self.following: + result[name] = c_type.to_python( + getattr(ctype_object, name), + *args, **kwargs + ) + + return result if result else None + + +@attr.s +class SQLResponse130(Response130): + """ + The response class of SQL functions is special in the way the row-column + data is counted in it. Basically, Ignite thin client API is following a + “counter right before the counted objects” rule in most of its parts. + SQL ops are breaking this rule. + """ + include_field_names = attr.ib(type=bool, default=False) + has_cursor = attr.ib(type=bool, default=False) + + def fields_or_field_count(self): + if self.include_field_names: + return 'fields', StringArray + return 'field_count', Int + + def parse(self, client: 'Client'): + header_class = self.build_header() + buffer = client.recv(ctypes.sizeof(header_class)) + header = header_class.from_buffer_copy(buffer) + fields = [] + + if header.status_code == OP_SUCCESS: + following = [ + self.fields_or_field_count(), + ('row_count', Int), + ] + if self.has_cursor: + following.insert(0, ('cursor', Long)) + body_struct = Struct(following) + body_class, body_buffer = body_struct.parse(client) + body = body_class.from_buffer_copy(body_buffer) + + if self.include_field_names: + field_count = body.fields.length + else: + field_count = body.field_count + + data_fields = [] + data_buffer = b'' + for i in range(body.row_count): + row_fields = [] + row_buffer = b'' + for j in range(field_count): + field_class, field_buffer = AnyDataObject.parse(client) + row_fields.append(('column_{}'.format(j), field_class)) + row_buffer += field_buffer + + row_class = type( + 'SQLResponseRow', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': row_fields, + } + ) + data_fields.append(('row_{}'.format(i), row_class)) + data_buffer += row_buffer + + data_class = type( + 'SQLResponseData', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': data_fields, + } + ) + fields += body_class._fields_ + [ + ('data', data_class), + ('more', ctypes.c_bool), + ] + buffer += body_buffer + data_buffer + else: + c_type, buffer_fragment = String.parse(client) + buffer += buffer_fragment + fields.append(('error_message', c_type)) + + final_class = type( + 'SQLResponse', + (header_class,), + { + '_pack_': 1, + '_fields_': fields, + } + ) + buffer += client.recv(ctypes.sizeof(final_class) - len(buffer)) + return final_class, buffer + + def to_python(self, ctype_object, *args, **kwargs): + if ctype_object.status_code == 0: + result = { + 'more': Bool.to_python( + ctype_object.more, *args, **kwargs + ), + 'data': [], + } + if hasattr(ctype_object, 'fields'): + result['fields'] = StringArray.to_python( + ctype_object.fields, *args, **kwargs + ) + else: + result['field_count'] = Int.to_python( + ctype_object.field_count, *args, **kwargs + ) + if hasattr(ctype_object, 'cursor'): + result['cursor'] = Long.to_python( + ctype_object.cursor, *args, **kwargs + ) + for row_item in ctype_object.data._fields_: + row_name = row_item[0] + row_object = getattr(ctype_object.data, row_name) + row = [] + for col_item in row_object._fields_: + col_name = col_item[0] + col_object = getattr(row_object, col_name) + row.append( + AnyDataObject.to_python(col_object, *args, **kwargs) + ) + result['data'].append(row) + return result + + +Response120 = Response130 +SQLResponse120 = SQLResponse130 diff --git a/pyignite/utils.py b/pyignite/utils.py index 1d4298e..ca9725d 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -13,14 +13,27 @@ # See the License for the specific language governing permissions and # limitations under the License. +import ctypes +import decimal + from functools import wraps -from typing import Any, Type, Union +from threading import Event, Thread +from typing import Any, Callable, Optional, Type, Tuple, Union from pyignite.datatypes.base import IgniteDataType from .constants import * -def is_iterable(value): +LONG_MASK = 0xffffffff +DIGITS_PER_INT = 9 + + +def is_pow2(value: int) -> bool: + """ Check if value is power of two. """ + return value > 0 and ((value & (value - 1)) == 0) + + +def is_iterable(value: Any) -> bool: """ Check if value is iterable. """ try: iter(value) @@ -71,7 +84,7 @@ def int_overflow(value: int) -> int: return ((value ^ 0x80000000) & 0xffffffff) - 0x80000000 -def unwrap_binary(client: 'Client', wrapped: tuple): +def unwrap_binary(client: 'Client', wrapped: tuple) -> object: """ Unwrap wrapped BinaryObject and convert it to Python data. @@ -82,13 +95,15 @@ def unwrap_binary(client: 'Client', wrapped: tuple): from pyignite.datatypes.complex import BinaryObject blob, offset = wrapped - client_clone = client.clone(prefetch=blob) - client_clone.pos = offset - data_class, data_bytes = BinaryObject.parse(client_clone) - return BinaryObject.to_python( + conn_clone = client.random_node.clone(prefetch=blob) + conn_clone.pos = offset + data_class, data_bytes = BinaryObject.parse(conn_clone) + result = BinaryObject.to_python( data_class.from_buffer_copy(data_bytes), client, ) + conn_clone.close() + return result def hashcode(string: Union[str, bytes]) -> int: @@ -118,13 +133,15 @@ def cache_id(cache: Union[str, int]) -> int: return cache if type(cache) is int else hashcode(cache) -def entity_id(cache: Union[str, int]) -> int: +def entity_id(cache: Union[str, int]) -> Optional[int]: """ Create a type ID from type name or field ID from field name. :param cache: entity name or ID, :return: entity ID. """ + if cache is None: + return None return cache if type(cache) is int else hashcode(cache.lower()) @@ -153,6 +170,56 @@ def schema_id(schema: Union[int, dict]) -> int: return s_id +def decimal_hashcode(value: decimal.Decimal) -> int: + """ + This is a translation of `java.math.BigDecimal` class `hashCode()` method + to Python. + + :param value: pythonic decimal value, + :return: hashcode. + """ + sign, digits, scale = value.normalize().as_tuple() + sign = -1 if sign else 1 + value = int(''.join([str(d) for d in digits])) + + if value < MAX_LONG: + # this is the case when Java BigDecimal digits are stored + # compactly, in the internal 64-bit integer field + int_hash = ( + (unsigned(value, ctypes.c_ulonglong) >> 32) * 31 + + (value & LONG_MASK) + ) & LONG_MASK + else: + # digits are not fit in the 64-bit long, so they get split internally + # to an array of values within 32-bit integer range each (it is really + # a part of `java.math.BigInteger` class internals) + magnitude = [] + order = 0 + while True: + elem = value >> order + if elem > 1: + magnitude.insert(0, ctypes.c_int(elem).value) + order += 32 + else: + break + + int_hash = 0 + for v in magnitude: + int_hash = (31 * int_hash + (v & LONG_MASK)) & LONG_MASK + + return ctypes.c_int(31 * int_hash * sign - scale).value + + +def datetime_hashcode(value: int) -> int: + """ + Calculates hashcode from UNIX epoch. + + :param value: UNIX time, + :return: Java hashcode. + """ + return (value & LONG_MASK) ^ (unsigned(value, ctypes.c_ulonglong) >> 32) + + def status_to_exception(exc: Type[Exception]): """ Converts erroneous status code with error message to an exception @@ -170,3 +237,62 @@ def ste_wrapper(*args, **kwargs): return result.value return ste_wrapper return ste_decorator + + +def get_field_by_id( + obj: 'GenericObjectMeta', field_id: int +) -> Tuple[Any, IgniteDataType]: + """ + Returns a complex object's field value, given the field's entity ID. + + :param obj: complex object, + :param field_id: field ID, + :return: complex object field's value and type. + """ + for fname, ftype in obj._schema.items(): + if entity_id(fname) == field_id: + return getattr(obj, fname, getattr(ftype, 'default')), ftype + + +def unsigned(value: int, c_type: ctypes._SimpleCData = ctypes.c_uint) -> int: + """ Convert signed integer value to unsigned. """ + return c_type(value).value + + +class DaemonicTimer(Thread): + """ + Same as normal `threading.Timer`, but do not delay the program exit. + """ + + def __init__(self, interval, function, args=None, kwargs=None): + Thread.__init__(self, daemon=True) + self.interval = interval + self.function = function + self.args = args if args is not None else [] + self.kwargs = kwargs if kwargs is not None else {} + self.finished = Event() + + def cancel(self): + """Stop the timer if it hasn't finished yet.""" + self.finished.set() + + def run(self): + self.finished.wait(self.interval) + if not self.finished.is_set(): + self.function(*self.args, **self.kwargs) + self.finished.set() + + +def capitalize(string: str) -> str: + """ + Capitalizing the string, assuming the first character is a letter. + Does not touch any other character, unlike the `string.capitalize()`. + """ + return string[:1].upper() + string[1:] + + +def process_delimiter(name: str, delimiter: str) -> str: + """ + Splits the name by delimiter, capitalize each part, merge. + """ + return ''.join([capitalize(x) for x in name.split(delimiter)]) diff --git a/requirements/tests.txt b/requirements/tests.txt index c107c8b..327f501 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -3,3 +3,4 @@ pytest==3.6.1 pytest-cov==2.5.1 teamcity-messages==1.21 +psutil==5.6.5 diff --git a/tests/config/ignite-config-base.xml b/tests/config/ignite-config-base.xml new file mode 100644 index 0000000..7487618 --- /dev/null +++ b/tests/config/ignite-config-base.xml @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + 127.0.0.1:48500..48503 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/config/ssl.xml b/tests/config/ignite-config-ssl.xml similarity index 77% rename from tests/config/ssl.xml rename to tests/config/ignite-config-ssl.xml index 8d74cbb..827405c 100644 --- a/tests/config/ssl.xml +++ b/tests/config/ignite-config-ssl.xml @@ -22,21 +22,16 @@ xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd"> + - - - - - - - + - + + + @@ -44,9 +39,9 @@ - + - + diff --git a/tests/config/ignite-config.xml b/tests/config/ignite-config.xml new file mode 100644 index 0000000..09fba2c --- /dev/null +++ b/tests/config/ignite-config.xml @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + diff --git a/tests/config/log4j.xml b/tests/config/log4j.xml new file mode 100644 index 0000000..f5562d0 --- /dev/null +++ b/tests/config/log4j.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/conftest.py b/tests/conftest.py index 8ebd5b8..9974b16 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,10 +21,11 @@ from pyignite import Client from pyignite.constants import * -from pyignite.api import cache_create, cache_get_names, cache_destroy +from pyignite.api import cache_create, cache_destroy +from tests.util import _start_ignite, start_ignite_gen, get_request_grid_idx -class UseSSLParser(argparse.Action): +class BoolParser(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): values = True if values is None else bool(strtobool(values)) @@ -64,14 +65,111 @@ def __call__(self, parser, namespace, values, option_string=None): ) +@pytest.fixture(scope='session', autouse=True) +def server1(request): + yield from start_ignite_server_gen(1, request) + + +@pytest.fixture(scope='session', autouse=True) +def server2(request): + yield from start_ignite_server_gen(2, request) + + +@pytest.fixture(scope='session', autouse=True) +def server3(request): + yield from start_ignite_server_gen(3, request) + + +@pytest.fixture(scope='module') +def start_ignite_server(use_ssl): + def start(idx=1): + return _start_ignite(idx, use_ssl=use_ssl) + + return start + + +def start_ignite_server_gen(idx, request): + use_ssl = request.config.getoption("--use-ssl") + yield from start_ignite_gen(idx, use_ssl) + + @pytest.fixture(scope='module') def client( - ignite_host, ignite_port, timeout, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, - ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, + node, timeout, partition_aware, use_ssl, ssl_keyfile, ssl_keyfile_password, + ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, + username, password, +): + yield from client0(node, timeout, partition_aware, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, + ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, password) + + +@pytest.fixture(scope='module') +def client_partition_aware( + node, timeout, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, + ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, + password +): + yield from client0(node, timeout, True, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, + ssl_cert_reqs, ssl_ciphers, ssl_version, username, password) + + +@pytest.fixture(scope='module') +def client_partition_aware_single_server( + node, timeout, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, + ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, + password +): + node = node[:1] + yield from client(node, timeout, True, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, + ssl_cert_reqs, ssl_ciphers, ssl_version, username, password) + + +@pytest.fixture +def cache(client): + cache_name = 'my_bucket' + conn = client.random_node + + cache_create(conn, cache_name) + yield cache_name + cache_destroy(conn, cache_name) + + +@pytest.fixture(autouse=True) +def log_init(): + # Init log call timestamp + get_request_grid_idx() + + +@pytest.fixture(scope='module') +def start_client(use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, + ssl_version,username, password): + def start(**kwargs): + cli_kw = kwargs.copy() + cli_kw.update({ + 'use_ssl': use_ssl, + 'ssl_keyfile': ssl_keyfile, + 'ssl_keyfile_password': ssl_keyfile_password, + 'ssl_certfile': ssl_certfile, + 'ssl_ca_certfile': ssl_ca_certfile, + 'ssl_cert_reqs': ssl_cert_reqs, + 'ssl_ciphers': ssl_ciphers, + 'ssl_version': ssl_version, + 'username': username, + 'password': password + }) + return Client(**cli_kw) + + return start + + +def client0( + node, timeout, partition_aware, use_ssl, ssl_keyfile, ssl_keyfile_password, + ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, password, ): client = Client( timeout=timeout, + partition_aware=partition_aware, use_ssl=use_ssl, ssl_keyfile=ssl_keyfile, ssl_keyfile_password=ssl_keyfile_password, @@ -83,21 +181,16 @@ def client( username=username, password=password, ) - client.connect(ignite_host, ignite_port) + nodes = [] + for n in node: + host, port = n.split(':') + port = int(port) + nodes.append((host, port)) + client.connect(nodes) yield client - for cache_name in cache_get_names(client).value: - cache_destroy(client, cache_name) client.close() -@pytest.fixture -def cache(client): - cache_name = 'my_bucket' - cache_create(client, cache_name) - yield cache_name - cache_destroy(client, cache_name) - - @pytest.fixture def examples(request): return request.config.getoption("--examples") @@ -112,17 +205,13 @@ def run_examples(request, examples): def pytest_addoption(parser): parser.addoption( - '--ignite-host', + '--node', action='append', - default=[IGNITE_DEFAULT_HOST], - help='Ignite binary protocol test server host (default: localhost)' - ) - parser.addoption( - '--ignite-port', - action='append', - default=[IGNITE_DEFAULT_PORT], - type=int, - help='Ignite binary protocol test server port (default: 10800)' + default=None, + help=( + 'Ignite binary protocol test server connection string ' + '(default: "localhost:10801")' + ) ) parser.addoption( '--timeout', @@ -134,9 +223,16 @@ def pytest_addoption(parser): 'integer or float value. Default is None' ) ) + parser.addoption( + '--partition-aware', + action=BoolParser, + nargs='?', + default=False, + help='Turn on the best effort affinity feature' + ) parser.addoption( '--use-ssl', - action=UseSSLParser, + action=BoolParser, nargs='?', default=False, help='Use SSL encryption' @@ -214,9 +310,11 @@ def pytest_addoption(parser): def pytest_generate_tests(metafunc): session_parameters = { - 'ignite_host': IGNITE_DEFAULT_HOST, - 'ignite_port': IGNITE_DEFAULT_PORT, + 'node': ['{host}:{port}'.format(host='127.0.0.1', port=10801), + '{host}:{port}'.format(host='127.0.0.1', port=10802), + '{host}:{port}'.format(host='127.0.0.1', port=10803)], 'timeout': None, + 'partition_aware': False, 'use_ssl': False, 'ssl_keyfile': None, 'ssl_keyfile_password': None, @@ -232,9 +330,10 @@ def pytest_generate_tests(metafunc): for param_name in session_parameters: if param_name in metafunc.fixturenames: param = metafunc.config.getoption(param_name) + # TODO: This does not work for bool if param is None: param = session_parameters[param_name] - if type(param) is not list: + if param_name == 'node' or type(param) is not list: param = [param] metafunc.parametrize(param_name, param, scope='session') diff --git a/tests/test_affinity.py b/tests/test_affinity.py new file mode 100644 index 0000000..a55251b --- /dev/null +++ b/tests/test_affinity.py @@ -0,0 +1,229 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime, timedelta +import decimal +from uuid import UUID, uuid4 + +import pytest + +from pyignite import GenericObjectMeta +from pyignite.api import * +from pyignite.constants import * +from pyignite.datatypes import * +from pyignite.datatypes.cache_config import CacheMode +from pyignite.datatypes.prop_codes import * + + +def test_get_node_partitions(client_partition_aware): + + conn = client_partition_aware.random_node + + cache_1 = client_partition_aware.get_or_create_cache('test_cache_1') + cache_2 = client_partition_aware.get_or_create_cache({ + PROP_NAME: 'test_cache_2', + PROP_CACHE_KEY_CONFIGURATION: [ + { + 'type_name': ByteArray.type_name, + 'affinity_key_field_name': 'byte_affinity', + } + ], + }) + cache_3 = client_partition_aware.get_or_create_cache('test_cache_3') + cache_4 = client_partition_aware.get_or_create_cache('test_cache_4') + cache_5 = client_partition_aware.get_or_create_cache('test_cache_5') + + result = cache_get_node_partitions( + conn, + [cache_1.cache_id, cache_2.cache_id] + ) + assert result.status == 0, result.message + + +@pytest.mark.parametrize( + 'key, key_hint', [ + # integers + (42, None), + (43, ByteObject), + (-44, ByteObject), + (45, IntObject), + (-46, IntObject), + (47, ShortObject), + (-48, ShortObject), + (49, LongObject), + (MAX_INT-50, LongObject), + (MAX_INT+51, LongObject), + + # floating point + (5.2, None), + (5.354, FloatObject), + (-5.556, FloatObject), + (-57.58, DoubleObject), + + # boolean + (True, None), + (True, BoolObject), + (False, BoolObject), + + # char + ('A', CharObject), + ('Z', CharObject), + ('⅓', CharObject), + ('á', CharObject), + ('ы', CharObject), + ('カ', CharObject), + ('Ø', CharObject), + ('ß', CharObject), + + # string + ('This is a test string', None), + ('Кириллица', None), + ('Little Mary had a lamb', String), + + # UUID + (UUID('12345678123456789876543298765432'), None), + (UUID('74274274274274274274274274274274'), UUIDObject), + (uuid4(), None), + + # decimal (long internal representation in Java) + (decimal.Decimal('-234.567'), None), + (decimal.Decimal('200.0'), None), + (decimal.Decimal('123.456'), DecimalObject), + (decimal.Decimal('1.0'), None), + (decimal.Decimal('0.02'), None), + + # decimal (BigInteger internal representation in Java) + (decimal.Decimal('12345671234567123.45671234567'), None), + (decimal.Decimal('-845678456.7845678456784567845'), None), + + # date and time + (datetime(1980, 1, 1), None), + ((datetime(1980, 1, 1), 999), TimestampObject), + (timedelta(days=99), TimeObject), + + ], +) +def test_affinity(client_partition_aware, key, key_hint): + + cache_1 = client_partition_aware.get_or_create_cache({ + PROP_NAME: 'test_cache_1', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + }) + value = 42 + cache_1.put(key, value, key_hint=key_hint) + + best_node = cache_1.get_best_node(key, key_hint=key_hint) + + for node in filter(lambda n: n.alive, client_partition_aware._nodes): + result = cache_local_peek( + node, cache_1.cache_id, key, key_hint=key_hint, + ) + if node is best_node: + assert result.value == value, ( + 'Affinity calculation error for {}'.format(key) + ) + else: + assert result.value is None, ( + 'Affinity calculation error for {}'.format(key) + ) + + cache_1.destroy() + + +def test_affinity_for_generic_object(client_partition_aware): + + cache_1 = client_partition_aware.get_or_create_cache({ + PROP_NAME: 'test_cache_1', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + }) + + class KeyClass( + metaclass=GenericObjectMeta, + schema={ + 'NO': IntObject, + 'NAME': String, + }, + ): + pass + + key = KeyClass() + key.NO = 1 + key.NAME = 'test_string' + + cache_1.put(key, 42, key_hint=BinaryObject) + + best_node = cache_1.get_best_node(key, key_hint=BinaryObject) + + for node in filter(lambda n: n.alive, client_partition_aware._nodes): + result = cache_local_peek( + node, cache_1.cache_id, key, key_hint=BinaryObject, + ) + if node is best_node: + assert result.value == 42, ( + 'Affinity calculation error for {}'.format(key) + ) + else: + assert result.value is None, ( + 'Affinity calculation error for {}'.format(key) + ) + + cache_1.destroy() + + +def test_affinity_for_generic_object_without_type_hints(client_partition_aware): + + if not client_partition_aware.partition_awareness_supported_by_protocol: + pytest.skip( + 'Best effort affinity is not supported by the protocol {}.'.format( + client_partition_aware.protocol_version + ) + ) + + cache_1 = client_partition_aware.get_or_create_cache({ + PROP_NAME: 'test_cache_1', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + }) + + class KeyClass( + metaclass=GenericObjectMeta, + schema={ + 'NO': IntObject, + 'NAME': String, + }, + ): + pass + + key = KeyClass() + key.NO = 2 + key.NAME = 'another_test_string' + + cache_1.put(key, 42) + + best_node = cache_1.get_best_node(key) + + for node in filter(lambda n: n.alive, client_partition_aware._nodes): + result = cache_local_peek( + node, cache_1.cache_id, key + ) + if node is best_node: + assert result.value == 42, ( + 'Affinity calculation error for {}'.format(key) + ) + else: + assert result.value is None, ( + 'Affinity calculation error for {}'.format(key) + ) + + cache_1.destroy() diff --git a/tests/test_affinity_bad_servers.py b/tests/test_affinity_bad_servers.py new file mode 100644 index 0000000..dce09de --- /dev/null +++ b/tests/test_affinity_bad_servers.py @@ -0,0 +1,63 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite.exceptions import ReconnectError +from tests.util import * + + +def test_client_with_multiple_bad_servers(start_client): + client = start_client(partition_aware=True) + with pytest.raises(ReconnectError) as e_info: + client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) + assert str(e_info.value) == "Can not connect." + + +def test_client_with_failed_server(request, start_ignite_server, start_client): + srv = start_ignite_server(4) + try: + client = start_client() + client.connect([("127.0.0.1", 10804)]) + cache = client.get_or_create_cache(request.node.name) + cache.put(1, 1) + kill_process_tree(srv.pid) + with pytest.raises(ConnectionResetError): + cache.get(1) + finally: + kill_process_tree(srv.pid) + + +def test_client_with_recovered_server(request, start_ignite_server, start_client): + srv = start_ignite_server(4) + try: + client = start_client() + client.connect([("127.0.0.1", 10804)]) + cache = client.get_or_create_cache(request.node.name) + cache.put(1, 1) + + # Kill and restart server + kill_process_tree(srv.pid) + srv = start_ignite_server(4) + + # First request fails + with pytest.raises(Exception): + cache.put(1, 2) + + # Retry succeeds + cache.put(1, 2) + assert cache.get(1) == 2 + finally: + kill_process_tree(srv.pid) diff --git a/tests/test_affinity_request_routing.py b/tests/test_affinity_request_routing.py new file mode 100644 index 0000000..eb46ab6 --- /dev/null +++ b/tests/test_affinity_request_routing.py @@ -0,0 +1,179 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +import pytest + +from pyignite import * +from pyignite.datatypes import * +from pyignite.datatypes.cache_config import CacheMode +from pyignite.datatypes.prop_codes import * +from tests.util import * + + +@pytest.mark.parametrize("key,grid_idx", [(1, 3), (2, 1), (3, 1), (4, 3), (5, 1), (6, 3), (11, 2), (13, 2), (19, 2)]) +@pytest.mark.parametrize("backups", [0, 1, 2, 3]) +def test_cache_operation_on_primitive_key_routes_request_to_primary_node( + request, key, grid_idx, backups, client_partition_aware): + + cache = client_partition_aware.get_or_create_cache({ + PROP_NAME: request.node.name + str(backups), + PROP_BACKUPS_NUMBER: backups, + }) + + # Warm up affinity map + cache.put(key, key) + get_request_grid_idx() + + # Test + cache.get(key) + assert get_request_grid_idx() == grid_idx + + cache.put(key, key) + assert get_request_grid_idx("Put") == grid_idx + + cache.replace(key, key + 1) + assert get_request_grid_idx("Replace") == grid_idx + + cache.clear_key(key) + assert get_request_grid_idx("ClearKey") == grid_idx + + cache.contains_key(key) + assert get_request_grid_idx("ContainsKey") == grid_idx + + cache.get_and_put(key, 3) + assert get_request_grid_idx("GetAndPut") == grid_idx + + cache.get_and_put_if_absent(key, 4) + assert get_request_grid_idx("GetAndPutIfAbsent") == grid_idx + + cache.put_if_absent(key, 5) + assert get_request_grid_idx("PutIfAbsent") == grid_idx + + cache.get_and_remove(key) + assert get_request_grid_idx("GetAndRemove") == grid_idx + + cache.get_and_replace(key, 6) + assert get_request_grid_idx("GetAndReplace") == grid_idx + + cache.remove_key(key) + assert get_request_grid_idx("RemoveKey") == grid_idx + + cache.remove_if_equals(key, -1) + assert get_request_grid_idx("RemoveIfEquals") == grid_idx + + cache.replace(key, -1) + assert get_request_grid_idx("Replace") == grid_idx + + cache.replace_if_equals(key, 10, -10) + assert get_request_grid_idx("ReplaceIfEquals") == grid_idx + + +@pytest.mark.skip(reason="Custom key objects are not supported yet") +def test_cache_operation_on_complex_key_routes_request_to_primary_node(): + pass + + +@pytest.mark.parametrize("key,grid_idx", [(1, 2), (2, 1), (3, 1), (4, 2), (5, 2), (6, 3)]) +@pytest.mark.skip(reason="Custom key objects are not supported yet") +def test_cache_operation_on_custom_affinity_key_routes_request_to_primary_node( + request, client_partition_aware, key, grid_idx): + class AffinityTestType1( + metaclass=GenericObjectMeta, + type_name='AffinityTestType1', + schema=OrderedDict([ + ('test_str', String), + ('test_int', LongObject) + ]) + ): + pass + + cache_config = { + PROP_NAME: request.node.name, + PROP_CACHE_KEY_CONFIGURATION: [ + { + 'type_name': 'AffinityTestType1', + 'affinity_key_field_name': 'test_int', + }, + ], + } + cache = client_partition_aware.create_cache(cache_config) + + # noinspection PyArgumentList + key_obj = AffinityTestType1( + test_str="abc", + test_int=key + ) + + cache.put(key_obj, 1) + cache.put(key_obj, 2) + + assert get_request_grid_idx("Put") == grid_idx + + +@pytest.mark.skip("https://issues.apache.org/jira/browse/IGNITE-13967") +def test_cache_operation_routed_to_new_cluster_node(request, start_ignite_server, start_client): + client = start_client(partition_aware=True) + client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802), ("127.0.0.1", 10803), ("127.0.0.1", 10804)]) + cache = client.get_or_create_cache(request.node.name) + key = 12 + cache.put(key, key) + cache.put(key, key) + assert get_request_grid_idx("Put") == 3 + + srv = start_ignite_server(4) + try: + # Wait for rebalance and partition map exchange + def check_grid_idx(): + cache.get(key) + return get_request_grid_idx() == 4 + wait_for_condition(check_grid_idx) + + # Response is correct and comes from the new node + res = cache.get_and_remove(key) + assert res == key + assert get_request_grid_idx("GetAndRemove") == 4 + finally: + kill_process_tree(srv.pid) + + +def test_unsupported_affinity_cache_operation_routed_to_random_node(client_partition_aware): + verify_random_node(client_partition_aware.get_cache("custom-affinity")) + + +def test_replicated_cache_operation_routed_to_random_node(request, client_partition_aware): + cache = client_partition_aware.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) + + verify_random_node(cache) + + +def verify_random_node(cache): + key = 1 + cache.put(key, key) + + idx1 = get_request_grid_idx("Put") + idx2 = idx1 + + # Try 10 times - random node may end up being the same + for _ in range(1, 10): + cache.put(key, key) + idx2 = get_request_grid_idx("Put") + if idx2 != idx1: + break + assert idx1 != idx2 diff --git a/tests/test_affinity_single_connection.py b/tests/test_affinity_single_connection.py new file mode 100644 index 0000000..c40393c --- /dev/null +++ b/tests/test_affinity_single_connection.py @@ -0,0 +1,102 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from tests.util import get_request_grid_idx + + +def test_all_cache_operations_with_partition_aware_client_on_single_server(request, client_partition_aware_single_server): + cache = client_partition_aware_single_server.get_or_create_cache(request.node.name) + key = 1 + key2 = 2 + + # Put/Get + cache.put(key, key) + assert cache.get(key) == key + + # Replace + res = cache.replace(key, key2) + assert res + assert cache.get(key) == key2 + + # Clear + cache.put(key2, key2) + cache.clear_key(key2) + assert cache.get(key2) is None + + # ContainsKey + assert cache.contains_key(key) + assert not cache.contains_key(key2) + + # GetAndPut + cache.put(key, key) + res = cache.get_and_put(key, key2) + assert res == key + assert cache.get(key) == key2 + + # GetAndPutIfAbsent + cache.clear_key(key) + res = cache.get_and_put_if_absent(key, key) + res2 = cache.get_and_put_if_absent(key, key2) + assert res is None + assert res2 == key + assert cache.get(key) == key + + # PutIfAbsent + cache.clear_key(key) + res = cache.put_if_absent(key, key) + res2 = cache.put_if_absent(key, key2) + assert res + assert not res2 + assert cache.get(key) == key + + # GetAndRemove + cache.put(key, key) + res = cache.get_and_remove(key) + assert res == key + assert cache.get(key) is None + + # GetAndReplace + cache.put(key, key) + res = cache.get_and_replace(key, key2) + assert res == key + assert cache.get(key) == key2 + + # RemoveKey + cache.put(key, key) + cache.remove_key(key) + assert cache.get(key) is None + + # RemoveIfEquals + cache.put(key, key) + res = cache.remove_if_equals(key, key2) + res2 = cache.remove_if_equals(key, key) + assert not res + assert res2 + assert cache.get(key) is None + + # Replace + cache.put(key, key) + cache.replace(key, key2) + assert cache.get(key) == key2 + + # ReplaceIfEquals + cache.put(key, key) + res = cache.replace_if_equals(key, key2, key2) + res2 = cache.replace_if_equals(key, key, key2) + assert not res + assert res2 + assert cache.get(key) == key2 diff --git a/tests/test_binary.py b/tests/test_binary.py index 29ccf68..1c051f0 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -278,3 +278,29 @@ class MyBinaryTypeV2( assert not hasattr(result, 'test_bool') migrate_cache.destroy() + + +def test_complex_object_names(client): + """ + Test the ability to work with Complex types, which names contains symbols + not suitable for use in Python identifiers. + """ + type_name = 'Non.Pythonic#type-name$' + key = 'key' + data = 'test' + + class NonPythonicallyNamedType( + metaclass=GenericObjectMeta, + type_name=type_name, + schema=OrderedDict([ + ('field', String), + ]) + ): + pass + + cache = client.get_or_create_cache('test_name_cache') + cache.put(key, NonPythonicallyNamedType(field=data)) + + obj = cache.get(key) + assert obj.type_name == type_name, 'Complex type name mismatch' + assert obj.field == data, 'Complex object data failure' diff --git a/tests/test_cache_class.py b/tests/test_cache_class.py index 22865be..1df0d44 100644 --- a/tests/test_cache_class.py +++ b/tests/test_cache_class.py @@ -23,7 +23,7 @@ BoolObject, DecimalObject, FloatObject, IntObject, String, ) from pyignite.datatypes.prop_codes import * -from pyignite.exceptions import CacheError +from pyignite.exceptions import CacheError, ParameterError def test_cache_create(client): @@ -178,7 +178,7 @@ def test_get_binary_type(client): @pytest.mark.parametrize('page_size', range(1, 17, 5)) -def test_cache_scan(client, page_size): +def test_cache_scan(request, client, page_size): test_data = { 1: 'This is a test', 2: 'One more test', @@ -197,7 +197,7 @@ def test_cache_scan(client, page_size): 15: 'sollicitudin iaculis', } - cache = client.get_or_create_cache('my_oop_cache') + cache = client.get_or_create_cache(request.node.name) cache.put_all(test_data) gen = cache.scan(page_size=page_size) @@ -219,3 +219,18 @@ def test_get_and_put_if_absent(client): cache.put('my_key', 43) value = cache.get_and_put_if_absent('my_key', 42) assert value is 43 + + +def test_cache_get_when_cache_does_not_exist(client): + cache = client.get_cache('missing-cache') + with pytest.raises(CacheError) as e_info: + cache.put(1, 1) + assert str(e_info.value) == "Cache does not exist [cacheId= 1665146971]" + + +def test_cache_create_with_none_name(client): + with pytest.raises(ParameterError) as e_info: + client.create_cache(None) + assert str(e_info.value) == "You should supply at least cache name" + + diff --git a/tests/test_cache_config.py b/tests/test_cache_config.py index 2f01618..b708b0c 100644 --- a/tests/test_cache_config.py +++ b/tests/test_cache_config.py @@ -19,10 +19,12 @@ def test_get_configuration(client): - result = cache_get_or_create(client, 'my_unique_cache') + conn = client.random_node + + result = cache_get_or_create(conn, 'my_unique_cache') assert result.status == 0 - result = cache_get_configuration(client, 'my_unique_cache') + result = cache_get_configuration(conn, 'my_unique_cache') assert result.status == 0 assert result.value[PROP_NAME] == 'my_unique_cache' @@ -30,8 +32,9 @@ def test_get_configuration(client): def test_create_with_config(client): cache_name = 'my_very_unique_name' + conn = client.random_node - result = cache_create_with_config(client, { + result = cache_create_with_config(conn, { PROP_NAME: cache_name, PROP_CACHE_KEY_CONFIGURATION: [ { @@ -42,10 +45,10 @@ def test_create_with_config(client): }) assert result.status == 0 - result = cache_get_names(client) + result = cache_get_names(conn) assert cache_name in result.value - result = cache_create_with_config(client, { + result = cache_create_with_config(conn, { PROP_NAME: cache_name, }) assert result.status != 0 @@ -54,8 +57,9 @@ def test_create_with_config(client): def test_get_or_create_with_config(client): cache_name = 'my_very_unique_name' + conn = client.random_node - result = cache_get_or_create_with_config(client, { + result = cache_get_or_create_with_config(conn, { PROP_NAME: cache_name, PROP_CACHE_KEY_CONFIGURATION: [ { @@ -66,10 +70,10 @@ def test_get_or_create_with_config(client): }) assert result.status == 0 - result = cache_get_names(client) + result = cache_get_names(conn) assert cache_name in result.value - result = cache_get_or_create_with_config(client, { + result = cache_get_or_create_with_config(conn, { PROP_NAME: cache_name, }) assert result.status == 0 diff --git a/tests/test_datatypes.py b/tests/test_datatypes.py index b68ba8c..ae66c38 100644 --- a/tests/test_datatypes.py +++ b/tests/test_datatypes.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict +import ctypes from datetime import datetime, timedelta import decimal import pytest @@ -20,6 +22,7 @@ from pyignite.api.key_value import cache_get, cache_put from pyignite.datatypes import * +from pyignite.utils import unsigned @pytest.mark.parametrize( @@ -47,7 +50,9 @@ # arrays of integers ([1, 2, 3, 5], None), - ([1, 2, 3, 5], ByteArrayObject), + (b'buzz', ByteArrayObject), + (bytearray([7, 8, 8, 11]), None), + (bytearray([7, 8, 8, 11]), ByteArrayObject), ([1, 2, 3, 5], ShortArrayObject), ([1, 2, 3, 5], IntArrayObject), @@ -114,26 +119,56 @@ ((-1, [(6001, 1), (6002, 2), (6003, 3)]), BinaryEnumArrayObject), # object array - ((-1, [1, 2, decimal.Decimal('3')]), None), + ((ObjectArrayObject.OBJECT, [1, 2, decimal.Decimal('3')]), ObjectArrayObject), # collection - ((3, [1, 2, 3]), CollectionObject), + ((CollectionObject.LINKED_LIST, [1, 2, 3]), None), # map - ((1, {'key': 4, 5: 6.0}), None), - ((2, {'key': 4, 5: 6.0}), None), + ((MapObject.HASH_MAP, {'key': 4, 5: 6.0}), None), + ((MapObject.LINKED_HASH_MAP, OrderedDict([('key', 4), (5, 6.0)])), None), ] ) def test_put_get_data(client, cache, value, value_hint): - result = cache_put(client, cache, 'my_key', value, value_hint=value_hint) + conn = client.random_node + + result = cache_put(conn, cache, 'my_key', value, value_hint=value_hint) assert result.status == 0 - result = cache_get(client, cache, 'my_key') + result = cache_get(conn, cache, 'my_key') assert result.status == 0 assert result.value == value +@pytest.mark.parametrize( + 'value', + [ + [1, 2, 3, 5], + (7, 8, 13, 18), + (-128, -1, 0, 1, 127, 255), + ] +) +def test_bytearray_from_list_or_tuple(client, cache, value): + """ + ByteArrayObject's pythonic type is `bytearray`, but it should also accept + lists or tuples as a content. + """ + + conn = client.random_node + + result = cache_put( + conn, cache, 'my_key', value, value_hint=ByteArrayObject + ) + assert result.status == 0 + + result = cache_get(conn, cache, 'my_key') + assert result.status == 0 + assert result.value == bytearray([ + unsigned(ch, ctypes.c_ubyte) for ch in value + ]) + + @pytest.mark.parametrize( 'uuid_string', [ diff --git a/tests/test_get_names.py b/tests/test_get_names.py index 0e50f3d..2d6c0bc 100644 --- a/tests/test_get_names.py +++ b/tests/test_get_names.py @@ -18,11 +18,13 @@ def test_get_names(client): + conn = client.random_node + bucket_names = ['my_bucket', 'my_bucket_2', 'my_bucket_3'] for name in bucket_names: - cache_create(client, name) + cache_create(conn, name) - result = cache_get_names(client) + result = cache_get_names(conn) assert result.status == 0 assert type(result.value) == list assert len(result.value) >= len(bucket_names) diff --git a/tests/test_handshake.py b/tests/test_handshake.py deleted file mode 100644 index d655d94..0000000 --- a/tests/test_handshake.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import socket - -from pyignite import Client -from pyignite.connection.handshake import HandshakeRequest, read_response - - -def test_handshake( - monkeypatch, - ignite_host, ignite_port, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, - ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, - username, password, -): - client = Client( - use_ssl=use_ssl, - ssl_keyfile=ssl_keyfile, - ssl_keyfile_password=ssl_keyfile_password, - ssl_certfile=ssl_certfile, - ssl_ca_certfile=ssl_ca_certfile, - ssl_cert_reqs=ssl_cert_reqs, - ssl_ciphers=ssl_ciphers, - ssl_version=ssl_version, - username=username, - password=password, - ) - client._socket = client._wrap( - socket.socket(socket.AF_INET, socket.SOCK_STREAM) - ) - client.socket.connect((ignite_host, ignite_port)) - hs_request = HandshakeRequest(username, password) - client.send(hs_request) - hs_response = read_response(client) - assert hs_response['op_code'] != 0 - - client.close() - - # intentionally pass wrong protocol version - from pyignite.connection import handshake - monkeypatch.setattr(handshake, 'PROTOCOL_VERSION_MAJOR', 10) - - client._socket = client._wrap( - socket.socket(socket.AF_INET, socket.SOCK_STREAM) - ) - client.socket.connect((ignite_host, ignite_port)) - hs_request = HandshakeRequest(username, password) - client.send(hs_request) - hs_response = read_response(client) - assert hs_response['op_code'] == 0 - - client.close() diff --git a/tests/test_key_value.py b/tests/test_key_value.py index 6b4fb0e..a7edce1 100644 --- a/tests/test_key_value.py +++ b/tests/test_key_value.py @@ -23,30 +23,36 @@ def test_put_get(client, cache): - result = cache_put(client, cache, 'my_key', 5) + conn = client.random_node + + result = cache_put(conn, cache, 'my_key', 5) assert result.status == 0 - result = cache_get(client, cache, 'my_key') + result = cache_get(conn, cache, 'my_key') assert result.status == 0 assert result.value == 5 def test_get_all(client, cache): - result = cache_get_all(client, cache, ['key_1', 2, (3, IntObject)]) + conn = client.random_node + + result = cache_get_all(conn, cache, ['key_1', 2, (3, IntObject)]) assert result.status == 0 assert result.value == {} - cache_put(client, cache, 'key_1', 4) - cache_put(client, cache, 3, 18, key_hint=IntObject) + cache_put(conn, cache, 'key_1', 4) + cache_put(conn, cache, 3, 18, key_hint=IntObject) - result = cache_get_all(client, cache, ['key_1', 2, (3, IntObject)]) + result = cache_get_all(conn, cache, ['key_1', 2, (3, IntObject)]) assert result.status == 0 assert result.value == {'key_1': 4, 3: 18} def test_put_all(client, cache): + conn = client.random_node + test_dict = { 1: 2, 'key_1': 4, @@ -54,10 +60,10 @@ def test_put_all(client, cache): } test_keys = ['key_1', 1, 3] - result = cache_put_all(client, cache, test_dict) + result = cache_put_all(conn, cache, test_dict) assert result.status == 0 - result = cache_get_all(client, cache, test_keys) + result = cache_get_all(conn, cache, test_keys) assert result.status == 0 assert len(test_dict) == 3 @@ -67,266 +73,300 @@ def test_put_all(client, cache): def test_contains_key(client, cache): - cache_put(client, cache, 'test_key', 42) + conn = client.random_node - result = cache_contains_key(client, cache, 'test_key') + cache_put(conn, cache, 'test_key', 42) + + result = cache_contains_key(conn, cache, 'test_key') assert result.value is True - result = cache_contains_key(client, cache, 'non-existant-key') + result = cache_contains_key(conn, cache, 'non-existant-key') assert result.value is False def test_contains_keys(client, cache): - cache_put(client, cache, 5, 6) - cache_put(client, cache, 'test_key', 42) + conn = client.random_node + + cache_put(conn, cache, 5, 6) + cache_put(conn, cache, 'test_key', 42) - result = cache_contains_keys(client, cache, [5, 'test_key']) + result = cache_contains_keys(conn, cache, [5, 'test_key']) assert result.value is True - result = cache_contains_keys(client, cache, [5, 'non-existent-key']) + result = cache_contains_keys(conn, cache, [5, 'non-existent-key']) assert result.value is False def test_get_and_put(client, cache): - result = cache_get_and_put(client, cache, 'test_key', 42) + conn = client.random_node + + result = cache_get_and_put(conn, cache, 'test_key', 42) assert result.status == 0 assert result.value is None - result = cache_get(client, cache, 'test_key') + result = cache_get(conn, cache, 'test_key') assert result.status == 0 assert result.value is 42 - result = cache_get_and_put(client, cache, 'test_key', 1234) + result = cache_get_and_put(conn, cache, 'test_key', 1234) assert result.status == 0 assert result.value == 42 def test_get_and_replace(client, cache): - result = cache_get_and_replace(client, cache, 'test_key', 42) + conn = client.random_node + + result = cache_get_and_replace(conn, cache, 'test_key', 42) assert result.status == 0 assert result.value is None - result = cache_get(client, cache, 'test_key') + result = cache_get(conn, cache, 'test_key') assert result.status == 0 assert result.value is None - cache_put(client, cache, 'test_key', 42) + cache_put(conn, cache, 'test_key', 42) - result = cache_get_and_replace(client, cache, 'test_key', 1234) + result = cache_get_and_replace(conn, cache, 'test_key', 1234) assert result.status == 0 assert result.value == 42 def test_get_and_remove(client, cache): - result = cache_get_and_remove(client, cache, 'test_key') + conn = client.random_node + + result = cache_get_and_remove(conn, cache, 'test_key') assert result.status == 0 assert result.value is None - cache_put(client, cache, 'test_key', 42) + cache_put(conn, cache, 'test_key', 42) - result = cache_get_and_remove(client, cache, 'test_key') + result = cache_get_and_remove(conn, cache, 'test_key') assert result.status == 0 assert result.value == 42 def test_put_if_absent(client, cache): - result = cache_put_if_absent(client, cache, 'test_key', 42) + conn = client.random_node + + result = cache_put_if_absent(conn, cache, 'test_key', 42) assert result.status == 0 assert result.value is True - result = cache_put_if_absent(client, cache, 'test_key', 1234) + result = cache_put_if_absent(conn, cache, 'test_key', 1234) assert result.status == 0 assert result.value is False def test_get_and_put_if_absent(client, cache): - result = cache_get_and_put_if_absent(client, cache, 'test_key', 42) + conn = client.random_node + + result = cache_get_and_put_if_absent(conn, cache, 'test_key', 42) assert result.status == 0 assert result.value is None - result = cache_get_and_put_if_absent(client, cache, 'test_key', 1234) + result = cache_get_and_put_if_absent(conn, cache, 'test_key', 1234) assert result.status == 0 assert result.value == 42 - result = cache_get_and_put_if_absent(client, cache, 'test_key', 5678) + result = cache_get_and_put_if_absent(conn, cache, 'test_key', 5678) assert result.status == 0 assert result.value == 42 def test_replace(client, cache): - result = cache_replace(client, cache, 'test_key', 42) + conn = client.random_node + + result = cache_replace(conn, cache, 'test_key', 42) assert result.status == 0 assert result.value is False - cache_put(client, cache, 'test_key', 1234) + cache_put(conn, cache, 'test_key', 1234) - result = cache_replace(client, cache, 'test_key', 42) + result = cache_replace(conn, cache, 'test_key', 42) assert result.status == 0 assert result.value is True - result = cache_get(client, cache, 'test_key') + result = cache_get(conn, cache, 'test_key') assert result.status == 0 assert result.value == 42 def test_replace_if_equals(client, cache): - result = cache_replace_if_equals(client, cache, 'my_test', 42, 1234) + conn = client.random_node + + result = cache_replace_if_equals(conn, cache, 'my_test', 42, 1234) assert result.status == 0 assert result.value is False - cache_put(client, cache, 'my_test', 42) + cache_put(conn, cache, 'my_test', 42) - result = cache_replace_if_equals(client, cache, 'my_test', 42, 1234) + result = cache_replace_if_equals(conn, cache, 'my_test', 42, 1234) assert result.status == 0 assert result.value is True - result = cache_get(client, cache, 'my_test') + result = cache_get(conn, cache, 'my_test') assert result.status == 0 assert result.value == 1234 def test_clear(client, cache): - result = cache_put(client, cache, 'my_test', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test', 42) assert result.status == 0 - result = cache_clear(client, cache) + result = cache_clear(conn, cache) assert result.status == 0 - result = cache_get(client, cache, 'my_test') + result = cache_get(conn, cache, 'my_test') assert result.status == 0 assert result.value is None def test_clear_key(client, cache): - result = cache_put(client, cache, 'my_test', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test', 42) assert result.status == 0 - result = cache_put(client, cache, 'another_test', 24) + result = cache_put(conn, cache, 'another_test', 24) assert result.status == 0 - result = cache_clear_key(client, cache, 'my_test') + result = cache_clear_key(conn, cache, 'my_test') assert result.status == 0 - result = cache_get(client, cache, 'my_test') + result = cache_get(conn, cache, 'my_test') assert result.status == 0 assert result.value is None - result = cache_get(client, cache, 'another_test') + result = cache_get(conn, cache, 'another_test') assert result.status == 0 assert result.value == 24 def test_clear_keys(client, cache): - result = cache_put(client, cache, 'my_test_key', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test_key', 42) assert result.status == 0 - result = cache_put(client, cache, 'another_test', 24) + result = cache_put(conn, cache, 'another_test', 24) assert result.status == 0 - result = cache_clear_keys(client, cache, [ + result = cache_clear_keys(conn, cache, [ 'my_test_key', 'nonexistent_key', ]) assert result.status == 0 - result = cache_get(client, cache, 'my_test_key') + result = cache_get(conn, cache, 'my_test_key') assert result.status == 0 assert result.value is None - result = cache_get(client, cache, 'another_test') + result = cache_get(conn, cache, 'another_test') assert result.status == 0 assert result.value == 24 def test_remove_key(client, cache): - result = cache_put(client, cache, 'my_test_key', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test_key', 42) assert result.status == 0 - result = cache_remove_key(client, cache, 'my_test_key') + result = cache_remove_key(conn, cache, 'my_test_key') assert result.status == 0 assert result.value is True - result = cache_remove_key(client, cache, 'non_existent_key') + result = cache_remove_key(conn, cache, 'non_existent_key') assert result.status == 0 assert result.value is False def test_remove_if_equals(client, cache): - result = cache_put(client, cache, 'my_test', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test', 42) assert result.status == 0 - result = cache_remove_if_equals(client, cache, 'my_test', 1234) + result = cache_remove_if_equals(conn, cache, 'my_test', 1234) assert result.status == 0 assert result.value is False - result = cache_remove_if_equals(client, cache, 'my_test', 42) + result = cache_remove_if_equals(conn, cache, 'my_test', 42) assert result.status == 0 assert result.value is True - result = cache_get(client, cache, 'my_test') + result = cache_get(conn, cache, 'my_test') assert result.status == 0 assert result.value is None def test_remove_keys(client, cache): - result = cache_put(client, cache, 'my_test', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test', 42) assert result.status == 0 - result = cache_put(client, cache, 'another_test', 24) + result = cache_put(conn, cache, 'another_test', 24) assert result.status == 0 - result = cache_remove_keys(client, cache, ['my_test', 'non_existent']) + result = cache_remove_keys(conn, cache, ['my_test', 'non_existent']) assert result.status == 0 - result = cache_get(client, cache, 'my_test') + result = cache_get(conn, cache, 'my_test') assert result.status == 0 assert result.value is None - result = cache_get(client, cache, 'another_test') + result = cache_get(conn, cache, 'another_test') assert result.status == 0 assert result.value == 24 def test_remove_all(client, cache): - result = cache_put(client, cache, 'my_test', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test', 42) assert result.status == 0 - result = cache_put(client, cache, 'another_test', 24) + result = cache_put(conn, cache, 'another_test', 24) assert result.status == 0 - result = cache_remove_all(client, cache) + result = cache_remove_all(conn, cache) assert result.status == 0 - result = cache_get(client, cache, 'my_test') + result = cache_get(conn, cache, 'my_test') assert result.status == 0 assert result.value is None - result = cache_get(client, cache, 'another_test') + result = cache_get(conn, cache, 'another_test') assert result.status == 0 assert result.value is None def test_cache_get_size(client, cache): - result = cache_put(client, cache, 'my_test', 42) + conn = client.random_node + + result = cache_put(conn, cache, 'my_test', 42) assert result.status == 0 - result = cache_get_size(client, cache) + result = cache_get_size(conn, cache) assert result.status == 0 assert result.value == 1 diff --git a/tests/test_scan.py b/tests/test_scan.py index 77e9613..2f0e056 100644 --- a/tests/test_scan.py +++ b/tests/test_scan.py @@ -20,47 +20,49 @@ def test_scan(client, cache): + conn = client.random_node page_size = 10 - result = cache_put_all(client, cache, { + result = cache_put_all(conn, cache, { 'key_{}'.format(v): v for v in range(page_size * 2) }) assert result.status == 0 - result = scan(client, cache, page_size) + result = scan(conn, cache, page_size) assert result.status == 0 assert len(result.value['data']) == page_size assert result.value['more'] is True cursor = result.value['cursor'] - result = scan_cursor_get_page(client, cursor) + result = scan_cursor_get_page(conn, cursor) assert result.status == 0 assert len(result.value['data']) == page_size assert result.value['more'] is False - result = scan_cursor_get_page(client, cursor) + result = scan_cursor_get_page(conn, cursor) assert result.status != 0 def test_close_resource(client, cache): + conn = client.random_node page_size = 10 - result = cache_put_all(client, cache, { + result = cache_put_all(conn, cache, { 'key_{}'.format(v): v for v in range(page_size * 2) }) assert result.status == 0 - result = scan(client, cache, page_size) + result = scan(conn, cache, page_size) assert result.status == 0 assert len(result.value['data']) == page_size assert result.value['more'] is True cursor = result.value['cursor'] - result = resource_close(client, cursor) + result = resource_close(conn, cursor) assert result.status == 0 - result = scan_cursor_get_page(client, cursor) + result = scan_cursor_get_page(conn, cursor) assert result.status != 0 diff --git a/tests/test_sql.py b/tests/test_sql.py index d983a20..87383d3 100644 --- a/tests/test_sql.py +++ b/tests/test_sql.py @@ -47,11 +47,13 @@ def test_sql(client): + conn = client.random_node + # cleanup client.sql(drop_query) result = sql_fields( - client, + conn, 'PUBLIC', create_query, page_size, @@ -62,7 +64,7 @@ def test_sql(client): for i, data_line in enumerate(initial_data, start=1): fname, lname, grade = data_line result = sql_fields( - client, + conn, 'PUBLIC', insert_query, page_size, @@ -71,12 +73,12 @@ def test_sql(client): ) assert result.status == 0, result.message - result = cache_get_configuration(client, 'SQL_PUBLIC_STUDENT') + result = cache_get_configuration(conn, 'SQL_PUBLIC_STUDENT') assert result.status == 0, result.message binary_type_name = result.value[PROP_QUERY_ENTITIES][0]['value_type_name'] result = sql( - client, + conn, 'SQL_PUBLIC_STUDENT', binary_type_name, 'TRUE', @@ -93,7 +95,7 @@ def test_sql(client): cursor = result.value['cursor'] while result.value['more']: - result = sql_cursor_get_page(client, cursor) + result = sql_cursor_get_page(conn, cursor) assert result.status == 0, result.message for wrapped_object in result.value['data'].values(): @@ -101,17 +103,19 @@ def test_sql(client): assert data.type_id == entity_id(binary_type_name) # repeat cleanup - result = sql_fields(client, 'PUBLIC', drop_query, page_size) + result = sql_fields(conn, 'PUBLIC', drop_query, page_size) assert result.status == 0 def test_sql_fields(client): + conn = client.random_node + # cleanup client.sql(drop_query) result = sql_fields( - client, + conn, 'PUBLIC', create_query, page_size, @@ -122,7 +126,7 @@ def test_sql_fields(client): for i, data_line in enumerate(initial_data, start=1): fname, lname, grade = data_line result = sql_fields( - client, + conn, 'PUBLIC', insert_query, page_size, @@ -132,7 +136,7 @@ def test_sql_fields(client): assert result.status == 0, result.message result = sql_fields( - client, + conn, 'PUBLIC', select_query, page_size, @@ -144,13 +148,13 @@ def test_sql_fields(client): cursor = result.value['cursor'] - result = sql_fields_cursor_get_page(client, cursor, field_count=4) + result = sql_fields_cursor_get_page(conn, cursor, field_count=4) assert result.status == 0 assert len(result.value['data']) == len(initial_data) - page_size assert result.value['more'] is False # repeat cleanup - result = sql_fields(client, 'PUBLIC', drop_query, page_size) + result = sql_fields(conn, 'PUBLIC', drop_query, page_size) assert result.status == 0 diff --git a/tests/util.py b/tests/util.py new file mode 100644 index 0000000..1d6acd6 --- /dev/null +++ b/tests/util.py @@ -0,0 +1,179 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os +import psutil +import re +import signal +import subprocess +import time + + +def wait_for_condition(condition, interval=0.1, timeout=10, error=None): + start = time.time() + res = condition() + + while not res and time.time() - start < timeout: + time.sleep(interval) + res = condition() + + if res: + return True + + if error is not None: + raise Exception(error) + + return False + + +def is_windows(): + return os.name == "nt" + + +def get_test_dir(): + return os.path.dirname(os.path.realpath(__file__)) + + +def get_ignite_dirs(): + ignite_home = os.getenv("IGNITE_HOME") + if ignite_home is not None: + yield ignite_home + + proj_dir = os.path.abspath(os.path.join(get_test_dir(), "..", "..")) + yield os.path.join(proj_dir, "ignite") + yield os.path.join(proj_dir, "incubator_ignite") + + +def get_ignite_runner(): + ext = ".bat" if is_windows() else ".sh" + for ignite_dir in get_ignite_dirs(): + runner = os.path.join(ignite_dir, "bin", "ignite" + ext) + print("Probing Ignite runner at '{0}'...".format(runner)) + if os.path.exists(runner): + return runner + + raise Exception(f"Ignite not found. IGNITE_HOME {os.getenv('IGNITE_HOME')}") + + +def get_ignite_config_path(use_ssl=False): + if use_ssl: + file_name = "ignite-config-ssl.xml" + else: + file_name = "ignite-config.xml" + + return os.path.join(get_test_dir(), "config", file_name) + + +def check_server_started(idx=1): + log_file = os.path.join(get_test_dir(), "logs", f"ignite-log-{idx}.txt") + if not os.path.exists(log_file): + return False + + pattern = re.compile('^Topology snapshot.*') + + with open(log_file) as f: + for line in f.readlines(): + if pattern.match(line): + return True + + return False + + +def kill_process_tree(pid): + if is_windows(): + subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)]) + else: + children = psutil.Process(pid).children(recursive=True) + for child in children: + os.kill(child.pid, signal.SIGKILL) + os.kill(pid, signal.SIGKILL) + + +def _start_ignite(idx=1, debug=False, use_ssl=False): + clear_logs(idx) + + runner = get_ignite_runner() + + env = os.environ.copy() + env['IGNITE_INSTANCE_INDEX'] = str(idx) + env['IGNITE_CLIENT_PORT'] = str(10800 + idx) + + if debug: + env["JVM_OPTS"] = "-Djava.net.preferIPv4Stack=true -Xdebug -Xnoagent -Djava.compiler=NONE " \ + "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 " + + ignite_cmd = [runner, get_ignite_config_path(use_ssl)] + print("Starting Ignite server node:", ignite_cmd) + + srv = subprocess.Popen(ignite_cmd, env=env, cwd=get_test_dir()) + + started = wait_for_condition(lambda: check_server_started(idx), timeout=30) + if started: + return srv + + kill_process_tree(srv.pid) + raise Exception("Failed to start Ignite: timeout while trying to connect") + + +def start_ignite_gen(idx=1, use_ssl=False): + srv = _start_ignite(idx, use_ssl=use_ssl) + yield srv + kill_process_tree(srv.pid) + + +def get_log_files(idx=1): + logs_pattern = os.path.join(get_test_dir(), "logs", "ignite-log-{0}*.txt".format(idx)) + return glob.glob(logs_pattern) + + +def clear_logs(idx=1): + for f in get_log_files(idx): + os.remove(f) + + +def read_log_file(file, idx): + i = -1 + with open(file) as f: + lines = f.readlines() + for line in lines: + i += 1 + + if i < read_log_file.last_line[idx]: + continue + + if i > read_log_file.last_line[idx]: + read_log_file.last_line[idx] = i + + # Example: Client request received [reqId=1, addr=/127.0.0.1:51694, + # req=org.apache.ignite.internal.processors.platform.client.cache.ClientCachePutRequest@1f33101e] + res = re.match("Client request received .*?req=org.apache.ignite.internal.processors." + "platform.client.cache.ClientCache([a-zA-Z]+)Request@", line) + + if res is not None: + yield res.group(1) + + +def get_request_grid_idx(message="Get"): + res = -1 + for i in range(1, 5): + for log_file in get_log_files(i): + for log in read_log_file(log_file, i): + if log == message: + res = i # Do not exit early to advance all log positions + return res + + +read_log_file.last_line = [0, 0, 0, 0, 0] \ No newline at end of file diff --git a/tox.ini b/tox.ini index 6e70234..69db226 100644 --- a/tox.ini +++ b/tox.ini @@ -15,23 +15,22 @@ [tox] skipsdist = True -envlist = py{36,37,38}-{no-ssl,ssl,ssl-password}-docker +envlist = py{36,37,38}-{no-ssl,ssl,ssl-password} [travis] python = - 3.6: py36-{no-ssl,ssl,ssl-password}-docker - 3.7: py37-{no-ssl,ssl,ssl-password}-docker - 3.8: py38-{no-ssl,ssl,ssl-password}-docker + 3.6: py36-{no-ssl,ssl,ssl-password} + 3.7: py37-{no-ssl,ssl,ssl-password} + 3.8: py38-{no-ssl,ssl,ssl-password} [testenv] -passenv = TEAMCITY_VERSION +passenv = TEAMCITY_VERSION IGNITE_HOME envdir = {homedir}/.virtualenvs/pyignite-{envname} deps = -r ./requirements/install.txt -r ./requirements/tests.txt recreate = True usedevelop = True -allowlist_externals = docker-compose commands = pytest {env:PYTESTARGS:} {posargs} @@ -41,45 +40,17 @@ setenv: [ssl] setenv: - PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_full.pem + PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_full.pem --ssl-version=TLSV1_2 [ssl-password] setenv: - PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_with_pass_full.pem --ssl-keyfile-password=654321 - -[docker] -commands_pre = - docker-compose down - docker-compose up -d ignite -commands_post = - docker-compose down - -[docker-ssl] -commands_pre = - docker-compose down - docker-compose up -d ignite-ssl -commands_post = {[docker]commands_post} + PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_with_pass_full.pem --ssl-keyfile-password=654321 --ssl-version=TLSV1_2 [testenv:py{36,37,38}-no-ssl] setenv: {[no-ssl]setenv} -[testenv:py{36,37,38}-no-ssl-docker] -commands_pre = {[docker]commands_pre} -setenv: {[no-ssl]setenv} -commands_post = {[docker]commands_post} - [testenv:py{36,37,38}-ssl] setenv: {[ssl]setenv} -[testenv:py{36,37,38}-ssl-docker] -commands_pre = {[docker-ssl]commands_pre} -setenv: {[ssl]setenv} -commands_post = {[docker]commands_post} - [testenv:py{36,37,38}-ssl-password] setenv: {[ssl-password]setenv} - -[testenv:py{36,37,38}-ssl-password-docker] -commands_pre = {[docker-ssl]commands_pre} -setenv: {[ssl-password]setenv} -commands_post = {[docker]commands_post} From 644de99a898ad840247081ec94ffcc2725a19c04 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 26 Jan 2021 11:13:05 +0300 Subject: [PATCH 03/62] IGNITE-14056: Fixed README and documentation This closes #3 --- README.md | 78 +++++--- docs/datatypes/parsers.rst | 176 +++++++++--------- docs/examples.rst | 2 +- .../source/pyignite.connection.generators.rst | 7 - docs/source/pyignite.connection.rst | 1 - requirements/docs.txt | 3 +- 6 files changed, 140 insertions(+), 127 deletions(-) delete mode 100644 docs/source/pyignite.connection.generators.rst diff --git a/README.md b/README.md index 26b9a6a..24f7b4e 100644 --- a/README.md +++ b/README.md @@ -9,16 +9,16 @@ Apache Ignite thin (binary protocol) client, written in Python 3. ## Installation -#### *for end user* +### *for end user* If you only want to use the `pyignite` module in your project, do: -``` +```bash $ pip install pyignite ``` -#### *for developer* +### *for developer* If you want to run tests, examples or build documentation, clone the whole repository: -``` +```bash $ git clone git@github.com:apache/ignite-python-thin-client.git $ pip install -e . ``` @@ -30,32 +30,47 @@ in the `pip` manual. Then run through the contents of `requirements` folder to install the additional requirements into your working Python environment using -``` +```bash $ pip install -r requirements/.txt ``` You may also want to consult the `setuptools` manual about using `setup.py`. +### Updating from older version + +To upgrade an existing package, use the following command: +```bash +pip install --upgrade pyignite +``` + +To install the latest version of a package: +```bash +pip install pyignite +``` + +To install a specific version: +```bash +pip install pyignite==0.4.0 +``` + ## Documentation [The package documentation](https://apache-ignite-binary-protocol-client.readthedocs.io) is available at *RTD* for your convenience. If you want to build the documentation from source, do the developer -installation as described above, then run the following commands: -``` -$ cd ignite/modules/platforms/python +installation as described above, then run the following commands from the +client's root directory: +```bash $ pip install -r requirements/docs.txt $ cd docs $ make html ``` -Then open `ignite/modules/platforms/python/docs/generated/html/index.html` -in your browser. +Then open `docs/generated/html/index.html` in your browser. ## Examples -Some examples of using pyignite are provided in -`ignite/modules/platforms/python/examples` folder. They are extensively -commented in the +Some examples of using pyignite are provided in `examples` folder. They are +extensively commented in the “[Examples of usage](https://apache-ignite-binary-protocol-client.readthedocs.io/en/latest/examples.html)” section of the documentation. @@ -63,30 +78,35 @@ This code implies that it is run in the environment with `pyignite` package installed, and Apache Ignite node is running on localhost:10800. ## Testing -*NB!* All tests require Apache Ignite node running on localhost:10800. For the convenience, `docker-compose.yml` is present. -So installing `docker` and `docker-compose` is recommended. Also, it is recommended installing `pyignite` in development -mode. You can do that using following command: -``` -$ pip install -e . -``` -### Run without ssl +*NB!* It is recommended installing `pyignite` in development mode. +Refer to [this section](#for-developer) for instructions. + +Do not forget to install test requirements: +```bash +$ pip install -r requirements/install.txt -r requirements/tests.txt ``` -$ docker-compose down && docker-compose up -d ignite + +Also, you'll need to have a binary release of Ignite with lib4j2 enabled and +`IGNITE_HOME` properly set: +```bash +$ cd +$ export IGNITE_HOME=$(pwd) +$ cp -r $IGNITE_HOME/libs/optional/ignite-log4j2 $IGNITE_HOME/libs/ +``` +### Run basic tests +```bash $ pytest ``` ### Run with examples -``` -$ docker-compose down && docker-compose up -d ignite -$ pytest --examples +```bash +$ pytest --examples ``` ### Run with ssl and not encrypted key -``` -$ docker-compose down && docker-compose up -d ignite -$ pytest --use-ssl=True --ssl-certfile=./tests/config/ssl/client_full.pem +```bash +$ pytest --use-ssl=True --ssl-certfile=./tests/ssl/client_full.pem ``` ### Run with ssl and password-protected key -``` -$ docker-compose down && docker-compose up -d ignite +```bash $ pytest --use-ssl=True --ssl-certfile=./tests/config/ssl/client_with_pass_full.pem --ssl-keyfile-password=654321 ``` diff --git a/docs/datatypes/parsers.rst b/docs/datatypes/parsers.rst index 71f9aac..92329cc 100644 --- a/docs/datatypes/parsers.rst +++ b/docs/datatypes/parsers.rst @@ -47,94 +47,94 @@ However, in some rare cases of type ambiguity, as well as for the needs of interoperability, you may have to sneak one or the other class, along with your data, in to some API function as a *type conversion hint*. -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|`type_code`|Apache Ignite |Python type |Parser/constructor | -| |docs reference |or class |class | -+===========+====================+===============================+==================================================================+ -|*Primitive data types* | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x01 |Byte_ |int |:class:`~pyignite.datatypes.primitive_objects.ByteObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x02 |Short_ |int |:class:`~pyignite.datatypes.primitive_objects.ShortObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x03 |Int_ |int |:class:`~pyignite.datatypes.primitive_objects.IntObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x04 |Long_ |int |:class:`~pyignite.datatypes.primitive_objects.LongObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x05 |Float_ |float |:class:`~pyignite.datatypes.primitive_objects.FloatObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x06 |Double_ |float |:class:`~pyignite.datatypes.primitive_objects.DoubleObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x07 |Char_ |str |:class:`~pyignite.datatypes.primitive_objects.CharObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x08 |Bool_ |bool |:class:`~pyignite.datatypes.primitive_objects.BoolObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x65 |Null_ |NoneType |:class:`~pyignite.datatypes.null_object.Null` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|*Standard objects* | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x09 |String_ |Str |:class:`~pyignite.datatypes.standard.String` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x0a |UUID_ |uuid.UUID |:class:`~pyignite.datatypes.standard.UUIDObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x21 |Timestamp_ |tuple |:class:`~pyignite.datatypes.standard.TimestampObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x0b |Date_ |datetime.datetime |:class:`~pyignite.datatypes.standard.DateObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x24 |Time_ |datetime.timedelta |:class:`~pyignite.datatypes.standard.TimeObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x1e |Decimal_ |decimal.Decimal |:class:`~pyignite.datatypes.standard.DecimalObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x1c |Enum_ |tuple |:class:`~pyignite.datatypes.standard.EnumObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x67 |`Binary enum`_ |tuple |:class:`~pyignite.datatypes.standard.BinaryEnumObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|*Arrays of primitives* | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x0c |`Byte array`_ |iterable/bytearray |:class:`~pyignite.datatypes.primitive_arrays.ByteArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x0d |`Short array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ShortArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x0e |`Int array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.IntArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x0f |`Long array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.LongArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x10 |`Float array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.FloatArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x11 |`Double array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.DoubleArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x12 |`Char array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.CharArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x13 |`Bool array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.BoolArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|*Arrays of standard objects* | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x14 |`String array`_ |iterable/list |:class:`~pyignite.datatypes.standard.StringArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x15 |`UUID array`_ |iterable/list |:class:`~pyignite.datatypes.standard.UUIDArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x22 |`Timestamp array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimestampArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x16 |`Date array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DateArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x23 |`Time array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimeArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x1f |`Decimal array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DecimalArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|*Object collections, special types, and complex object* | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x17 |`Object array`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.ObjectArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x18 |`Collection`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.CollectionObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x19 |`Map`_ |tuple[int, dict/OrderedDict] |:class:`~pyignite.datatypes.complex.MapObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x1d |`Enum array`_ |iterable/list |:class:`~pyignite.datatypes.standard.EnumArrayObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x67 |`Complex object`_ |object |:class:`~pyignite.datatypes.complex.BinaryObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ -|0x1b |`Wrapped data`_ |tuple[int, bytes] |:class:`~pyignite.datatypes.complex.WrappedDataObject` | -+-----------+--------------------+-------------------------------+------------------------------------------------------------------+ ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +| `type_code` |Apache Ignite |Python type |Parser/constructor | +| |docs reference |or class |class | ++=============+====================+===============================+==================================================================+ +|*Primitive data types* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x01 |Byte_ |int |:class:`~pyignite.datatypes.primitive_objects.ByteObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x02 |Short_ |int |:class:`~pyignite.datatypes.primitive_objects.ShortObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x03 |Int_ |int |:class:`~pyignite.datatypes.primitive_objects.IntObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x04 |Long_ |int |:class:`~pyignite.datatypes.primitive_objects.LongObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x05 |Float_ |float |:class:`~pyignite.datatypes.primitive_objects.FloatObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x06 |Double_ |float |:class:`~pyignite.datatypes.primitive_objects.DoubleObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x07 |Char_ |str |:class:`~pyignite.datatypes.primitive_objects.CharObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x08 |Bool_ |bool |:class:`~pyignite.datatypes.primitive_objects.BoolObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x65 |Null_ |NoneType |:class:`~pyignite.datatypes.null_object.Null` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Standard objects* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x09 |String_ |Str |:class:`~pyignite.datatypes.standard.String` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0a |UUID_ |uuid.UUID |:class:`~pyignite.datatypes.standard.UUIDObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x21 |Timestamp_ |tuple |:class:`~pyignite.datatypes.standard.TimestampObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0b |Date_ |datetime.datetime |:class:`~pyignite.datatypes.standard.DateObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x24 |Time_ |datetime.timedelta |:class:`~pyignite.datatypes.standard.TimeObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1e |Decimal_ |decimal.Decimal |:class:`~pyignite.datatypes.standard.DecimalObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1c |Enum_ |tuple |:class:`~pyignite.datatypes.standard.EnumObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x67 |`Binary enum`_ |tuple |:class:`~pyignite.datatypes.standard.BinaryEnumObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Arrays of primitives* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0c |`Byte array`_ |iterable/bytearray |:class:`~pyignite.datatypes.primitive_arrays.ByteArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0d |`Short array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.ShortArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0e |`Int array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.IntArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x0f |`Long array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.LongArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x10 |`Float array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.FloatArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x11 |`Double array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.DoubleArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x12 |`Char array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.CharArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x13 |`Bool array`_ |iterable/list |:class:`~pyignite.datatypes.primitive_arrays.BoolArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Arrays of standard objects* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x14 |`String array`_ |iterable/list |:class:`~pyignite.datatypes.standard.StringArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x15 |`UUID array`_ |iterable/list |:class:`~pyignite.datatypes.standard.UUIDArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x22 |`Timestamp array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimestampArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x16 |`Date array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DateArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x23 |`Time array`_ |iterable/list |:class:`~pyignite.datatypes.standard.TimeArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1f |`Decimal array`_ |iterable/list |:class:`~pyignite.datatypes.standard.DecimalArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|*Object collections, special types, and complex object* | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x17 |`Object array`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.ObjectArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x18 |`Collection`_ |tuple[int, iterable/list] |:class:`~pyignite.datatypes.complex.CollectionObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x19 |`Map`_ |tuple[int, dict/OrderedDict] |:class:`~pyignite.datatypes.complex.MapObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1d |`Enum array`_ |iterable/list |:class:`~pyignite.datatypes.standard.EnumArrayObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x67 |`Complex object`_ |object |:class:`~pyignite.datatypes.complex.BinaryObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ +|0x1b |`Wrapped data`_ |tuple[int, bytes] |:class:`~pyignite.datatypes.complex.WrappedDataObject` | ++-------------+--------------------+-------------------------------+------------------------------------------------------------------+ .. _Byte: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-byte .. _Short: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-short diff --git a/docs/examples.rst b/docs/examples.rst index 39deef3..4b8c7e3 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -212,7 +212,7 @@ Fill tables with data. :language: python :lines: 43-50, 63-66, 78-81, 211-218 -Data samples are taken from `Ignite GitHub repository`_. +Data samples are taken from `PyIgnite GitHub repository`_. That concludes the preparation of data. Now let us answer some questions. diff --git a/docs/source/pyignite.connection.generators.rst b/docs/source/pyignite.connection.generators.rst deleted file mode 100644 index daecda3..0000000 --- a/docs/source/pyignite.connection.generators.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.connection.generators module -===================================== - -.. automodule:: pyignite.connection.generators - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.connection.rst b/docs/source/pyignite.connection.rst index 92b07a7..f1acd2b 100644 --- a/docs/source/pyignite.connection.rst +++ b/docs/source/pyignite.connection.rst @@ -11,7 +11,6 @@ Submodules .. toctree:: - pyignite.connection.generators pyignite.connection.handshake pyignite.connection.ssl diff --git a/requirements/docs.txt b/requirements/docs.txt index 75ab231..962f07f 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -2,5 +2,6 @@ # (look up the prebuilt docs in `docs/generated`) -r install.txt +wheel==0.36.2 Sphinx==1.7.5 -sphinxcontrib-fulltoc==1.2.0 +sphinxcontrib-fulltoc==1.2.0 \ No newline at end of file From 4ca871dee575521cba5fc5f1a3df7b9fd839badb Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 26 Jan 2021 13:28:50 +0300 Subject: [PATCH 04/62] IGNITE-14058: Bool arrays now return as bool array, not int array This closes #4 --- pyignite/datatypes/primitive_arrays.py | 9 +++++++++ tests/test_datatypes.py | 7 +++++++ 2 files changed, 16 insertions(+) diff --git a/pyignite/datatypes/primitive_arrays.py b/pyignite/datatypes/primitive_arrays.py index bca4fd9..3763b96 100644 --- a/pyignite/datatypes/primitive_arrays.py +++ b/pyignite/datatypes/primitive_arrays.py @@ -297,3 +297,12 @@ class BoolArrayObject(PrimitiveArrayObject): _type_id = TYPE_BOOLEAN_ARR primitive_type = Bool type_code = TC_BOOL_ARRAY + + @classmethod + def to_python(cls, ctype_object, *args, **kwargs): + if not ctype_object: + return None + result = [False] * ctype_object.length + for i in range(ctype_object.length): + result[i] = ctype_object.data[i] != 0 + return result diff --git a/tests/test_datatypes.py b/tests/test_datatypes.py index ae66c38..83e9a60 100644 --- a/tests/test_datatypes.py +++ b/tests/test_datatypes.py @@ -65,6 +65,9 @@ # array of bool ([True, False, True], None), + ([True, False], BoolArrayObject), + ([False, True], BoolArrayObject), + ([True, False, True, False], BoolArrayObject), # string ('Little Mary had a lamb', None), @@ -140,6 +143,10 @@ def test_put_get_data(client, cache, value, value_hint): assert result.status == 0 assert result.value == value + if isinstance(result.value, list): + for res, val in zip(result.value, value): + assert type(res) == type(val) + @pytest.mark.parametrize( 'value', From c746329f9148e84ab15268a9d15c0587f9b9e19a Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Wed, 27 Jan 2021 17:44:35 +0300 Subject: [PATCH 05/62] IGNITE-14072 Refactor, remove duplicates and optimize Response and SQLResponse This closes #8 --- pyignite/api/binary.py | 9 +- pyignite/datatypes/complex.py | 2 +- pyignite/queries/__init__.py | 2 +- pyignite/queries/query.py | 80 +++---- pyignite/queries/response.py | 384 ++++++++-------------------------- 5 files changed, 130 insertions(+), 347 deletions(-) diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index 97f9fbd..1d63b49 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -20,10 +20,11 @@ body_struct, enum_struct, schema_struct, binary_fields_struct, ) from pyignite.datatypes import String, Int, Bool -from pyignite.queries import Query, get_response_class +from pyignite.queries import Query from pyignite.queries.op_codes import * from pyignite.utils import int_overflow, entity_id from .result import APIResult +from ..queries.response import Response def get_binary_type( @@ -53,9 +54,9 @@ def get_binary_type( }) connection.send(send_buffer) - response_head_struct = get_response_class(connection)([ - ('type_exists', Bool), - ]) + response_head_struct = Response(protocol_version=connection.get_protocol_version(), + following=[('type_exists', Bool)]) + response_head_type, recv_buffer = response_head_struct.parse(connection) response_head = response_head_type.from_buffer_copy(recv_buffer) response_parts = [] diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index d9ce36a..ad2a770 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -456,7 +456,7 @@ def find_client(): frame = rec[0] code = frame.f_code for varname in code.co_varnames: - suspect = frame.f_locals[varname] + suspect = frame.f_locals.get(varname) if isinstance(suspect, Client): return suspect if isinstance(suspect, Connection): diff --git a/pyignite/queries/__init__.py b/pyignite/queries/__init__.py index 3029f87..d558125 100644 --- a/pyignite/queries/__init__.py +++ b/pyignite/queries/__init__.py @@ -21,4 +21,4 @@ :mod:`pyignite.datatypes` binary parser/generator classes. """ -from .query import Query, ConfigQuery, get_response_class +from .query import Query, ConfigQuery diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index 0e7cfa3..69b6fa2 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -13,26 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import attr import ctypes from random import randint -import attr - from pyignite.api.result import APIResult -from pyignite.constants import * -from pyignite.queries import response - - -def get_response_class(obj: object, sql: bool = False): - """ - Response class factory. - - :param obj: cache, connection or client object, - :param sql: (optional) return normal (default) or SQL response class, - :return: response class. - """ - template = 'SQLResponse{}{}{}' if sql else 'Response{}{}{}' - return getattr(response, template.format(*obj.get_protocol_version())) +from pyignite.connection import Connection +from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED +from pyignite.queries.response import Response, SQLResponse @attr.s @@ -59,11 +47,7 @@ def build_c_type(cls): ) return cls._query_c_type - def from_python(self, values: dict = None): - if values is None: - values = {} - buffer = b'' - + def _build_header(self, buffer: bytearray, values: dict): header_class = self.build_c_type() header = header_class() header.op_code = self.op_code @@ -74,14 +58,23 @@ def from_python(self, values: dict = None): buffer += c_type.from_python(values[name]) header.length = ( - len(buffer) - + ctypes.sizeof(header_class) - - ctypes.sizeof(ctypes.c_int) + len(buffer) + + ctypes.sizeof(header_class) + - ctypes.sizeof(ctypes.c_int) ) - return header.query_id, bytes(header) + buffer + + return header + + def from_python(self, values: dict = None): + if values is None: + values = {} + buffer = bytearray() + header = self._build_header(buffer, values) + buffer[:0] = bytes(header) + return header.query_id, bytes(buffer) def perform( - self, conn: 'Connection', query_params: dict = None, + self, conn: Connection, query_params: dict = None, response_config: list = None, sql: bool = False, **kwargs, ) -> APIResult: """ @@ -98,8 +91,14 @@ def perform( """ _, send_buffer = self.from_python(query_params) conn.send(send_buffer) - response_class = get_response_class(conn, sql) - response_struct = response_class(response_config, **kwargs) + + if sql: + response_struct = SQLResponse(protocol_version=conn.get_protocol_version(), + following=response_config, **kwargs) + else: + response_struct = Response(protocol_version=conn.get_protocol_version(), + following=response_config) + response_ctype, recv_buffer = response_struct.parse(conn) response = response_ctype.from_buffer_copy(recv_buffer) @@ -141,24 +140,7 @@ def build_c_type(cls): ) return cls._query_c_type - def from_python(self, values: dict = None): - if values is None: - values = {} - buffer = b'' - - header_class = self.build_c_type() - header = header_class() - header.op_code = self.op_code - if self.query_id is None: - header.query_id = randint(MIN_LONG, MAX_LONG) - - for name, c_type in self.following: - buffer += c_type.from_python(values[name]) - - header.length = ( - len(buffer) - + ctypes.sizeof(header_class) - - ctypes.sizeof(ctypes.c_int) - ) - header.config_length = header.length - ctypes.sizeof(header_class) - return header.query_id, bytes(header) + buffer + def _build_header(self, buffer: bytearray, values: dict): + header = super()._build_header(buffer, values) + header.config_length = header.length - ctypes.sizeof(type(header)) + return header diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index 5fb4879..6003959 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -13,74 +13,83 @@ # See the License for the specific language governing permissions and # limitations under the License. +import attr from collections import OrderedDict import ctypes -import attr - -from pyignite.constants import * -from pyignite.datatypes import ( - AnyDataObject, Bool, Int, Long, String, StringArray, Struct, -) -from .op_codes import * +from pyignite.constants import RHF_TOPOLOGY_CHANGED, RHF_ERROR +from pyignite.connection import Connection +from pyignite.datatypes import AnyDataObject, Bool, Int, Long, String, StringArray, Struct +from pyignite.queries.op_codes import OP_SUCCESS @attr.s -class Response140: +class Response: following = attr.ib(type=list, factory=list) + protocol_version = attr.ib(type=tuple, factory=tuple) _response_header = None def __attrs_post_init__(self): # replace None with empty list self.following = self.following or [] - @classmethod - def build_header(cls): - if cls._response_header is None: - cls._response_header = type( + def build_header(self): + if self._response_header is None: + fields = [ + ('length', ctypes.c_int), + ('query_id', ctypes.c_longlong), + ] + + if self.protocol_version and self.protocol_version >= (1, 4, 0): + fields.append(('flags', ctypes.c_short)) + else: + fields.append(('status_code', ctypes.c_int),) + + self._response_header = type( 'ResponseHeader', (ctypes.LittleEndianStructure,), { '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('query_id', ctypes.c_longlong), - ('flags', ctypes.c_short), - ], + '_fields_': fields, }, ) - return cls._response_header + return self._response_header - def parse(self, conn: 'Connection'): + def parse(self, conn: Connection): header_class = self.build_header() - buffer = conn.recv(ctypes.sizeof(header_class)) + buffer = bytearray(conn.recv(ctypes.sizeof(header_class))) header = header_class.from_buffer_copy(buffer) fields = [] - if header.flags & RHF_TOPOLOGY_CHANGED: - fields = [ - ('affinity_version', ctypes.c_longlong), - ('affinity_minor', ctypes.c_int), - ] + has_error = False + if self.protocol_version and self.protocol_version >= (1, 4, 0): + if header.flags & RHF_TOPOLOGY_CHANGED: + fields = [ + ('affinity_version', ctypes.c_longlong), + ('affinity_minor', ctypes.c_int), + ] + + if header.flags & RHF_ERROR: + fields.append(('status_code', ctypes.c_int)) + has_error = True + else: + has_error = header.status_code != OP_SUCCESS - if header.flags & RHF_ERROR: - fields.append(('status_code', ctypes.c_int)) + if fields: buffer += conn.recv( - sum([ctypes.sizeof(field[1]) for field in fields]) + sum([ctypes.sizeof(c_type) for _, c_type in fields]) ) + + if has_error: msg_type, buffer_fragment = String.parse(conn) buffer += buffer_fragment fields.append(('error_message', msg_type)) - else: - buffer += conn.recv( - sum([ctypes.sizeof(field[1]) for field in fields]) - ) - for name, ignite_type in self.following: - c_type, buffer_fragment = ignite_type.parse(conn) - buffer += buffer_fragment - fields.append((name, c_type)) + self._parse_success(conn, buffer, fields) + return self._create_parse_result(conn, header_class, fields, buffer) + + def _create_parse_result(self, conn: Connection, header_class, fields: list, buffer: bytearray): response_class = type( 'Response', (header_class,), @@ -89,7 +98,13 @@ def parse(self, conn: 'Connection'): '_fields_': fields, } ) - return response_class, buffer + return response_class, bytes(buffer) + + def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): + for name, ignite_type in self.following: + c_type, buffer_fragment = ignite_type.parse(conn) + buffer += buffer_fragment + fields.append((name, c_type)) def to_python(self, ctype_object, *args, **kwargs): result = OrderedDict() @@ -104,7 +119,7 @@ def to_python(self, ctype_object, *args, **kwargs): @attr.s -class SQLResponse140(Response140): +class SQLResponse(Response): """ The response class of SQL functions is special in the way the row-column data is counted in it. Basically, Ignite thin client API is following a @@ -119,266 +134,55 @@ def fields_or_field_count(self): return 'fields', StringArray return 'field_count', Int - def parse(self, conn: 'Connection'): - header_class = self.build_header() - buffer = conn.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - - if header.flags & RHF_TOPOLOGY_CHANGED: - fields = [ - ('affinity_version', ctypes.c_longlong), - ('affinity_minor', ctypes.c_int), - ] + def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): + following = [ + self.fields_or_field_count(), + ('row_count', Int), + ] + if self.has_cursor: + following.insert(0, ('cursor', Long)) + body_struct = Struct(following) + body_class, body_buffer = body_struct.parse(conn) + body = body_class.from_buffer_copy(body_buffer) + buffer += body_buffer - if header.flags & RHF_ERROR: - fields.append(('status_code', ctypes.c_int)) - buffer += conn.recv( - sum([ctypes.sizeof(field[1]) for field in fields]) - ) - msg_type, buffer_fragment = String.parse(conn) - buffer += buffer_fragment - fields.append(('error_message', msg_type)) + if self.include_field_names: + field_count = body.fields.length else: - buffer += conn.recv( - sum([ctypes.sizeof(field[1]) for field in fields]) - ) - following = [ - self.fields_or_field_count(), - ('row_count', Int), - ] - if self.has_cursor: - following.insert(0, ('cursor', Long)) - body_struct = Struct(following) - body_class, body_buffer = body_struct.parse(conn) - body = body_class.from_buffer_copy(body_buffer) - - if self.include_field_names: - field_count = body.fields.length - else: - field_count = body.field_count - - data_fields = [] - data_buffer = b'' - for i in range(body.row_count): - row_fields = [] - row_buffer = b'' - for j in range(field_count): - field_class, field_buffer = AnyDataObject.parse(conn) - row_fields.append(('column_{}'.format(j), field_class)) - row_buffer += field_buffer - - row_class = type( - 'SQLResponseRow', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': row_fields, - } - ) - data_fields.append(('row_{}'.format(i), row_class)) - data_buffer += row_buffer - - data_class = type( - 'SQLResponseData', + field_count = body.field_count + + data_fields = [] + for i in range(body.row_count): + row_fields = [] + for j in range(field_count): + field_class, field_buffer = AnyDataObject.parse(conn) + row_fields.append(('column_{}'.format(j), field_class)) + buffer += field_buffer + + row_class = type( + 'SQLResponseRow', (ctypes.LittleEndianStructure,), { '_pack_': 1, - '_fields_': data_fields, + '_fields_': row_fields, } ) - fields += body_class._fields_ + [ - ('data', data_class), - ('more', ctypes.c_bool), - ] - buffer += body_buffer + data_buffer + data_fields.append(('row_{}'.format(i), row_class)) - final_class = type( - 'SQLResponse', - (header_class,), + data_class = type( + 'SQLResponseData', + (ctypes.LittleEndianStructure,), { '_pack_': 1, - '_fields_': fields, + '_fields_': data_fields, } ) - buffer += conn.recv(ctypes.sizeof(final_class) - len(buffer)) - return final_class, buffer - - def to_python(self, ctype_object, *args, **kwargs): - if not hasattr(ctype_object, 'status_code'): - result = { - 'more': Bool.to_python( - ctype_object.more, *args, **kwargs - ), - 'data': [], - } - if hasattr(ctype_object, 'fields'): - result['fields'] = StringArray.to_python( - ctype_object.fields, *args, **kwargs - ) - else: - result['field_count'] = Int.to_python( - ctype_object.field_count, *args, **kwargs - ) - if hasattr(ctype_object, 'cursor'): - result['cursor'] = Long.to_python( - ctype_object.cursor, *args, **kwargs - ) - for row_item in ctype_object.data._fields_: - row_name = row_item[0] - row_object = getattr(ctype_object.data, row_name) - row = [] - for col_item in row_object._fields_: - col_name = col_item[0] - col_object = getattr(row_object, col_name) - row.append( - AnyDataObject.to_python(col_object, *args, **kwargs) - ) - result['data'].append(row) - return result - - -@attr.s -class Response130: - following = attr.ib(type=list, factory=list) - _response_header = None - - def __attrs_post_init__(self): - # replace None with empty list - self.following = self.following or [] - - @classmethod - def build_header(cls): - if cls._response_header is None: - cls._response_header = type( - 'ResponseHeader', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ('query_id', ctypes.c_longlong), - ('status_code', ctypes.c_int), - ], - }, - ) - return cls._response_header - - def parse(self, client: 'Client'): - header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - - if header.status_code == OP_SUCCESS: - for name, ignite_type in self.following: - c_type, buffer_fragment = ignite_type.parse(client) - buffer += buffer_fragment - fields.append((name, c_type)) - else: - c_type, buffer_fragment = String.parse(client) - buffer += buffer_fragment - fields.append(('error_message', c_type)) - - response_class = type( - 'Response', - (header_class,), - { - '_pack_': 1, - '_fields_': fields, - } - ) - return response_class, buffer - - def to_python(self, ctype_object, *args, **kwargs): - result = OrderedDict() - - for name, c_type in self.following: - result[name] = c_type.to_python( - getattr(ctype_object, name), - *args, **kwargs - ) - - return result if result else None - - -@attr.s -class SQLResponse130(Response130): - """ - The response class of SQL functions is special in the way the row-column - data is counted in it. Basically, Ignite thin client API is following a - “counter right before the counted objects” rule in most of its parts. - SQL ops are breaking this rule. - """ - include_field_names = attr.ib(type=bool, default=False) - has_cursor = attr.ib(type=bool, default=False) - - def fields_or_field_count(self): - if self.include_field_names: - return 'fields', StringArray - return 'field_count', Int - - def parse(self, client: 'Client'): - header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] - - if header.status_code == OP_SUCCESS: - following = [ - self.fields_or_field_count(), - ('row_count', Int), - ] - if self.has_cursor: - following.insert(0, ('cursor', Long)) - body_struct = Struct(following) - body_class, body_buffer = body_struct.parse(client) - body = body_class.from_buffer_copy(body_buffer) - - if self.include_field_names: - field_count = body.fields.length - else: - field_count = body.field_count - - data_fields = [] - data_buffer = b'' - for i in range(body.row_count): - row_fields = [] - row_buffer = b'' - for j in range(field_count): - field_class, field_buffer = AnyDataObject.parse(client) - row_fields.append(('column_{}'.format(j), field_class)) - row_buffer += field_buffer - - row_class = type( - 'SQLResponseRow', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': row_fields, - } - ) - data_fields.append(('row_{}'.format(i), row_class)) - data_buffer += row_buffer - - data_class = type( - 'SQLResponseData', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': data_fields, - } - ) - fields += body_class._fields_ + [ - ('data', data_class), - ('more', ctypes.c_bool), - ] - buffer += body_buffer + data_buffer - else: - c_type, buffer_fragment = String.parse(client) - buffer += buffer_fragment - fields.append(('error_message', c_type)) + fields += body_class._fields_ + [ + ('data', data_class), + ('more', ctypes.c_bool), + ] + def _create_parse_result(self, conn: Connection, header_class, fields: list, buffer: bytearray): final_class = type( 'SQLResponse', (header_class,), @@ -387,11 +191,11 @@ def parse(self, client: 'Client'): '_fields_': fields, } ) - buffer += client.recv(ctypes.sizeof(final_class) - len(buffer)) - return final_class, buffer + buffer += conn.recv(ctypes.sizeof(final_class) - len(buffer)) + return final_class, bytes(buffer) def to_python(self, ctype_object, *args, **kwargs): - if ctype_object.status_code == 0: + if getattr(ctype_object, 'status_code', 0) == 0: result = { 'more': Bool.to_python( ctype_object.more, *args, **kwargs @@ -422,7 +226,3 @@ def to_python(self, ctype_object, *args, **kwargs): ) result['data'].append(row) return result - - -Response120 = Response130 -SQLResponse120 = SQLResponse130 From 18d32bb76a030f6aea6ed66dc3e403d096a42023 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Wed, 27 Jan 2021 23:59:05 +0300 Subject: [PATCH 06/62] IGNITE-14057 Support big-endian systems Fix primitives decoding on big-endian architectures. This closes #7 --- pyignite/api/binary.py | 2 +- pyignite/datatypes/internal.py | 2 +- pyignite/datatypes/primitive.py | 44 +++++++++++++++++++++---- pyignite/datatypes/primitive_objects.py | 7 +++- pyignite/queries/response.py | 2 +- tests/test_binary.py | 2 +- 6 files changed, 47 insertions(+), 12 deletions(-) diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index 1d63b49..722001a 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -86,7 +86,7 @@ def get_binary_type( if result.status != 0: return result result.value = { - 'type_exists': response.type_exists + 'type_exists': Bool.to_python(response.type_exists) } if hasattr(response, 'body'): result.value.update(body_struct.to_python(response.body)) diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 9fd5d64..9f23ec6 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -204,7 +204,7 @@ class Struct: def parse( self, client: 'Client' - ) -> Tuple[ctypes.BigEndianStructure, bytes]: + ) -> Tuple[ctypes.LittleEndianStructure, bytes]: buffer = b'' fields = [] values = {} diff --git a/pyignite/datatypes/primitive.py b/pyignite/datatypes/primitive.py index 23d070d..d549fda 100644 --- a/pyignite/datatypes/primitive.py +++ b/pyignite/datatypes/primitive.py @@ -14,6 +14,8 @@ # limitations under the License. import ctypes +import struct +import sys from pyignite.constants import * from .base import IgniteDataType @@ -48,13 +50,9 @@ class Primitive(IgniteDataType): def parse(cls, client: 'Client'): return cls.c_type, client.recv(ctypes.sizeof(cls.c_type)) - @staticmethod - def to_python(ctype_object, *args, **kwargs): - return ctype_object - @classmethod - def from_python(cls, value): - return bytes(cls.c_type(value)) + def to_python(cls, ctype_object, *args, **kwargs): + return ctype_object class Byte(Primitive): @@ -62,36 +60,60 @@ class Byte(Primitive): _type_id = TYPE_BYTE c_type = ctypes.c_byte + @classmethod + def from_python(cls, value): + return struct.pack(" int: return 1231 if value else 1237 + + @classmethod + def to_python(cls, ctype_object, *args, **kwargs): + return ctype_object.value != 0 + diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index 6003959..05a519a 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -179,7 +179,7 @@ def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): ) fields += body_class._fields_ + [ ('data', data_class), - ('more', ctypes.c_bool), + ('more', ctypes.c_byte), ] def _create_parse_result(self, conn: Connection, header_class, fields: list, buffer: bytearray): diff --git a/tests/test_binary.py b/tests/test_binary.py index 1c051f0..5190a6a 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -225,7 +225,7 @@ class OuterType( def test_add_schema_to_binary_object(client): - migrate_cache = client.create_cache('migrate_binary') + migrate_cache = client.get_or_create_cache('migrate_binary') class MyBinaryType( metaclass=GenericObjectMeta, From b13d43b59e170fc33b2b8fc39c770ec348a7c70c Mon Sep 17 00:00:00 2001 From: Ilya Kasnacheev Date: Thu, 28 Jan 2021 01:03:39 +0300 Subject: [PATCH 07/62] IGNITE-14075: Fix hash code calculation for composite keys Co-authored-by: Aleksandr Shapkin This closes #9 --- .gitignore | 1 + pyignite/binary.py | 2 +- pyignite/utils.py | 2 +- tests/test_cache_composite_key_class_sql.py | 123 ++++++++++++++++++++ 4 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 tests/test_cache_composite_key_class_sql.py diff --git a/.gitignore b/.gitignore index a779771..7372921 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .idea +.vscode .eggs .pytest_cache .tox diff --git a/pyignite/binary.py b/pyignite/binary.py index 99f2f02..5d76c1b 100644 --- a/pyignite/binary.py +++ b/pyignite/binary.py @@ -165,7 +165,7 @@ def _build(self, client: 'Client' = None) -> int: + len(field_buffer) ) header.length = header.schema_offset + ctypes.sizeof(schema_class) - header.hash_code = hashcode(field_buffer + bytes(schema)) + header.hash_code = hashcode(field_buffer) # reuse the results self._buffer = bytes(header) + field_buffer + bytes(schema) diff --git a/pyignite/utils.py b/pyignite/utils.py index ca9725d..ebe5501 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -113,7 +113,7 @@ def hashcode(string: Union[str, bytes]) -> int: :param string: UTF-8-encoded string identifier of binary buffer, :return: hash code. """ - result = 0 + result = 1 if isinstance(string, (bytes, bytearray)) else 0 for char in string: try: char = ord(char) diff --git a/tests/test_cache_composite_key_class_sql.py b/tests/test_cache_composite_key_class_sql.py new file mode 100644 index 0000000..2f1705f --- /dev/null +++ b/tests/test_cache_composite_key_class_sql.py @@ -0,0 +1,123 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +from pyignite import GenericObjectMeta +from pyignite.datatypes import ( + IntObject, String +) + + +class StudentKey( + metaclass=GenericObjectMeta, + type_name='test.model.StudentKey', + schema=OrderedDict([ + ('ID', IntObject), + ('DEPT', String) + ]) + ): + pass + + +class Student( + metaclass=GenericObjectMeta, + type_name='test.model.Student', + schema=OrderedDict([ + ('NAME', String), + ]) + ): + pass + + +create_query = '''CREATE TABLE StudentTable ( + id INT(11), + dept VARCHAR, + name CHAR(24), + PRIMARY KEY (id, dept)) + WITH "CACHE_NAME=StudentCache, KEY_TYPE=test.model.StudentKey, VALUE_TYPE=test.model.Student"''' + +insert_query = '''INSERT INTO StudentTable (id, dept, name) VALUES (?, ?, ?)''' + +select_query = 'SELECT _KEY, id, dept, name FROM StudentTable' + +drop_query = 'DROP TABLE StudentTable IF EXISTS' + + +def test_cache_get_with_composite_key_finds_sql_value(client): + """ + Should query a record with composite key and calculate + internal hashcode correctly. + """ + + client.sql(drop_query) + + # Create table. + result = client.sql(create_query) + assert next(result)[0] == 0 + + student_key = StudentKey(1, 'Acct') + student_val = Student('John') + + # Put new Strudent with StudentKey. + result = client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME]) + assert next(result)[0] == 1 + + # Cache get finds the same value. + studentCache = client.get_cache('StudentCache') + val = studentCache.get(student_key) + assert val is not None + assert val.NAME == student_val.NAME + + query_result = list(client.sql(select_query, include_field_names=True)) + + validate_query_result(student_key, student_val, query_result) + + +def test_python_sql_finds_inserted_value_with_composite_key(client): + """ + Insert a record with a composite key and query it with SELECT SQL. + """ + + client.sql(drop_query) + + # Create table. + result = client.sql(create_query) + assert next(result)[0] == 0 + + student_key = StudentKey(2, 'Business') + student_val = Student('Abe') + + # Put new value using cache. + studentCache = client.get_cache('StudentCache') + studentCache.put(student_key, student_val) + + # Find the value using SQL. + query_result = list(client.sql(select_query, include_field_names=True)) + + validate_query_result(student_key, student_val, query_result) + + +def validate_query_result(student_key, student_val, query_result): + ''' + Compare query result with expected key and value. + ''' + assert len(query_result) == 2 + sql_row = dict(zip(query_result[0], query_result[1])) + + assert sql_row["_KEY"][0] == student_key._buffer + assert sql_row['ID'] == student_key.ID + assert sql_row['DEPT'] == student_key.DEPT + assert sql_row['NAME'] == student_val.NAME From e0c22ef3aef39ea8a42ddb6b4495b7bcaa479417 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 2 Feb 2021 12:16:43 +0300 Subject: [PATCH 08/62] IGNITE-14059: Fix hashing of complex objects This closes #5 --- pyignite/utils.py | 35 ++++++++++++++++++++++-------- tests/test_binary.py | 51 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 9 deletions(-) diff --git a/pyignite/utils.py b/pyignite/utils.py index ebe5501..ce00d53 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -106,20 +106,37 @@ def unwrap_binary(client: 'Client', wrapped: tuple) -> object: return result -def hashcode(string: Union[str, bytes]) -> int: +def hashcode(data: Union[str, bytes]) -> int: """ Calculate hash code used for identifying objects in Ignite binary API. - :param string: UTF-8-encoded string identifier of binary buffer, + :param data: UTF-8-encoded string identifier of binary buffer or byte array :return: hash code. """ - result = 1 if isinstance(string, (bytes, bytearray)) else 0 - for char in string: - try: - char = ord(char) - except TypeError: - pass - result = int_overflow(31 * result + char) + if isinstance(data, str): + """ + For strings we iterate over code point which are of the int type + and can take up to 4 bytes and can only be positive. + """ + result = 0 + for char in data: + try: + char_val = ord(char) + result = int_overflow(31 * result + char_val) + except TypeError: + pass + else: + """ + For byte array we iterate over bytes which only take 1 byte. But + according to protocol, bytes during hashing should be treated as signed + integer numbers 8 bits long. On other hand elements in Python's `bytes` + are unsigned. For this reason we use ctypes.c_byte() to make them + signed. + """ + result = 1 + for byte in data: + byte = ctypes.c_byte(byte).value + result = int_overflow(31 * result + byte) return result diff --git a/tests/test_binary.py b/tests/test_binary.py index 5190a6a..4c45afb 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -304,3 +304,54 @@ class NonPythonicallyNamedType( obj = cache.get(key) assert obj.type_name == type_name, 'Complex type name mismatch' assert obj.field == data, 'Complex object data failure' + + +def test_complex_object_hash(client): + """ + Test that Python client correctly calculates hash of the binary + object that contains negative bytes. + """ + class Internal( + metaclass=GenericObjectMeta, + type_name='Internal', + schema=OrderedDict([ + ('id', IntObject), + ('str', String), + ]) + ): + pass + + class TestObject( + metaclass=GenericObjectMeta, + type_name='TestObject', + schema=OrderedDict([ + ('id', IntObject), + ('str', String), + ('internal', BinaryObject), + ]) + ): + pass + + obj_ascii = TestObject() + obj_ascii.id = 1 + obj_ascii.str = 'test_string' + + obj_ascii.internal = Internal() + obj_ascii.internal.id = 2 + obj_ascii.internal.str = 'lorem ipsum' + + hash_ascii = BinaryObject.hashcode(obj_ascii, client=client) + + assert hash_ascii == -1314567146, 'Invalid hashcode value for object with ASCII strings' + + obj_utf8 = TestObject() + obj_utf8.id = 1 + obj_utf8.str = 'юникод' + + obj_utf8.internal = Internal() + obj_utf8.internal.id = 2 + obj_utf8.internal.str = 'ユニコード' + + hash_utf8 = BinaryObject.hashcode(obj_utf8, client=client) + + assert hash_utf8 == -1945378474, 'Invalid hashcode value for object with UTF-8 strings' From 2c0ecb2cba76ba5c70ea062e46d04dcc164e3d03 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 2 Feb 2021 16:36:45 +0300 Subject: [PATCH 09/62] IGNITE-13863: Fix Null reading and writing This closes #6 --- pyignite/datatypes/complex.py | 69 +++++++++++++++++++++---- pyignite/datatypes/internal.py | 5 +- pyignite/datatypes/primitive_arrays.py | 33 ++++++++++-- pyignite/datatypes/primitive_objects.py | 23 +++++++-- pyignite/datatypes/standard.py | 28 +++++++--- tests/test_binary.py | 46 +++++++++++++++-- 6 files changed, 172 insertions(+), 32 deletions(-) diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index ad2a770..6860583 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -20,11 +20,13 @@ from pyignite.constants import * from pyignite.exceptions import ParseError + from .base import IgniteDataType from .internal import AnyDataObject, infer_from_python from .type_codes import * from .type_ids import * from .type_names import * +from .null_object import Null __all__ = [ @@ -68,8 +70,13 @@ def build_header(cls): @classmethod def parse(cls, client: 'Client'): + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type + header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) + buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) header = header_class.from_buffer_copy(buffer) fields = [] @@ -91,7 +98,10 @@ def parse(cls, client: 'Client'): @classmethod def to_python(cls, ctype_object, *args, **kwargs): result = [] - for i in range(ctype_object.length): + length = getattr(ctype_object, "length", None) + if length is None: + return None + for i in range(length): result.append( AnyDataObject.to_python( getattr(ctype_object, 'element_{}'.format(i)), @@ -102,6 +112,9 @@ def to_python(cls, ctype_object, *args, **kwargs): @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() + type_or_id, value = value header_class = cls.build_header() header = header_class() @@ -150,8 +163,13 @@ def build_header(cls): @classmethod def parse(cls, client: 'Client'): + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type + header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) + buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) header = header_class.from_buffer_copy(buffer) final_class = type( @@ -243,8 +261,13 @@ def build_header(cls): @classmethod def parse(cls, client: 'Client'): + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type + header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) + buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) header = header_class.from_buffer_copy(buffer) fields = [] @@ -266,7 +289,10 @@ def parse(cls, client: 'Client'): @classmethod def to_python(cls, ctype_object, *args, **kwargs): result = [] - for i in range(ctype_object.length): + length = getattr(ctype_object, "length", None) + if length is None: + return None + for i in range(length): result.append( AnyDataObject.to_python( getattr(ctype_object, 'element_{}'.format(i)), @@ -277,6 +303,9 @@ def to_python(cls, ctype_object, *args, **kwargs): @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() + type_or_id, value = value header_class = cls.build_header() header = header_class() @@ -330,8 +359,13 @@ def build_header(cls): @classmethod def parse(cls, client: 'Client'): + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type + header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) + buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) header = header_class.from_buffer_copy(buffer) fields = [] @@ -420,12 +454,18 @@ def build_header(cls): @classmethod def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object.type, super().to_python( + obj_type = getattr(ctype_object, "type", None) + if obj_type is None: + return None + return obj_type, super().to_python( ctype_object, *args, **kwargs ) @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() + type_id, value = value return super().from_python(value, type_id) @@ -539,9 +579,13 @@ def get_dataclass(conn: 'Connection', header) -> OrderedDict: @classmethod def parse(cls, client: 'Client'): from pyignite.datatypes import Struct + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type header_class = cls.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) + buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) header = header_class.from_buffer_copy(buffer) # ignore full schema, always retrieve fields' types and order @@ -572,14 +616,17 @@ def parse(cls, client: 'Client'): @classmethod def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): + type_id = getattr(ctype_object, "type_id", None) + if type_id is None: + return None if not client: raise ParseError( - 'Can not query binary type {}'.format(ctype_object.type_id) + 'Can not query binary type {}'.format(type_id) ) data_class = client.query_binary_type( - ctype_object.type_id, + type_id, ctype_object.schema_id ) result = data_class() @@ -596,6 +643,8 @@ def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): @classmethod def from_python(cls, value: object): + if value is None: + return Null.from_python() if getattr(value, '_buffer', None) is None: client = cls.find_client() diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 9f23ec6..23b9cc4 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -479,7 +479,10 @@ def parse(self, client: 'Client'): @classmethod def to_python(cls, ctype_object, *args, **kwargs): result = [] - for i in range(ctype_object.length): + length = getattr(ctype_object, "length", None) + if length is None: + return None + for i in range(length): result.append( super().to_python( getattr(ctype_object, 'element_{}'.format(i)), diff --git a/pyignite/datatypes/primitive_arrays.py b/pyignite/datatypes/primitive_arrays.py index 3763b96..1b41728 100644 --- a/pyignite/datatypes/primitive_arrays.py +++ b/pyignite/datatypes/primitive_arrays.py @@ -17,6 +17,7 @@ from typing import Any from pyignite.constants import * +from . import Null from .base import IgniteDataType from .primitive import * from .type_codes import * @@ -61,8 +62,13 @@ def build_header_class(cls): @classmethod def parse(cls, client: 'Client'): + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type + header_class = cls.build_header_class() - buffer = client.recv(ctypes.sizeof(header_class)) + buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) header = header_class.from_buffer_copy(buffer) final_class = type( cls.__name__, @@ -82,12 +88,18 @@ def parse(cls, client: 'Client'): @classmethod def to_python(cls, ctype_object, *args, **kwargs): result = [] - for i in range(ctype_object.length): + length = getattr(ctype_object, "length", None) + if length is None: + return None + for i in range(length): result.append(ctype_object.data[i]) return result @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() + header_class = cls.build_header_class() header = header_class() if hasattr(header, 'type_code'): @@ -112,7 +124,10 @@ class ByteArray(PrimitiveArray): @classmethod def to_python(cls, ctype_object, *args, **kwargs): - return bytearray(ctype_object.data) + data = getattr(ctype_object, "data", None) + if data is None: + return None + return bytearray(data) @classmethod def from_python(cls, value): @@ -210,6 +225,9 @@ def to_python(cls, ctype_object, *args, **kwargs): @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() + header_class = cls.build_header_class() header = header_class() header.type_code = int.from_bytes( @@ -282,6 +300,8 @@ class CharArrayObject(PrimitiveArrayObject): @classmethod def to_python(cls, ctype_object, *args, **kwargs): values = super().to_python(ctype_object, *args, **kwargs) + if values is None: + return None return [ v.to_bytes( ctypes.sizeof(cls.primitive_type.c_type), @@ -302,7 +322,10 @@ class BoolArrayObject(PrimitiveArrayObject): def to_python(cls, ctype_object, *args, **kwargs): if not ctype_object: return None - result = [False] * ctype_object.length - for i in range(ctype_object.length): + length = getattr(ctype_object, "length", None) + if length is None: + return None + result = [False] * length + for i in range(length): result[i] = ctype_object.data[i] != 0 return result diff --git a/pyignite/datatypes/primitive_objects.py b/pyignite/datatypes/primitive_objects.py index 033ac9e..53f12d2 100644 --- a/pyignite/datatypes/primitive_objects.py +++ b/pyignite/datatypes/primitive_objects.py @@ -17,10 +17,12 @@ from pyignite.constants import * from pyignite.utils import unsigned + from .base import IgniteDataType from .type_codes import * from .type_ids import * from .type_names import * +from .null_object import Null __all__ = [ @@ -60,16 +62,21 @@ def build_c_type(cls): @classmethod def parse(cls, client: 'Client'): + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type data_type = cls.build_c_type() - buffer = client.recv(ctypes.sizeof(data_type)) + buffer = tc_type + client.recv(ctypes.sizeof(data_type) - len(tc_type)) return data_type, buffer @staticmethod def to_python(ctype_object, *args, **kwargs): - return ctype_object.value + return getattr(ctype_object, "value", None) @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -185,13 +192,18 @@ def hashcode(value: str, *args, **kwargs) -> int: @classmethod def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object.value.to_bytes( + value = getattr(ctype_object, "value", None) + if value is None: + return None + return value.to_bytes( ctypes.sizeof(cls.c_type), byteorder=PROTOCOL_BYTE_ORDER ).decode(PROTOCOL_CHAR_ENCODING) @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() if type(value) is str: value = value.encode(PROTOCOL_CHAR_ENCODING) # assuming either a bytes or an integer @@ -218,5 +230,8 @@ def hashcode(value: bool, *args, **kwargs) -> int: @classmethod def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object.value != 0 + value = getattr(ctype_object, "value", None) + if value is None: + return None + return value != 0 diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index c65cae4..0f16735 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -276,8 +276,6 @@ class UUIDObject(StandardObject): UUID_BYTE_ORDER = (7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8) - UUID_BYTE_ORDER = (7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8) - @staticmethod def hashcode(value: 'UUID', *args, **kwargs) -> int: msb = value.int >> 64 @@ -303,6 +301,9 @@ def build_c_type(cls): @classmethod def from_python(cls, value: uuid.UUID): + if value is None: + return Null.from_python() + data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -548,8 +549,6 @@ def from_python(cls, value: tuple): cls.type_code, byteorder=PROTOCOL_BYTE_ORDER ) - if value is None: - return Null.from_python(value) data_object.type_id, data_object.ordinal = value return bytes(data_object) @@ -601,8 +600,13 @@ def build_header_class(cls): @classmethod def parse(cls, client: 'Client'): + tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) + + if tc_type == TC_NULL: + return Null.build_c_type(), tc_type + header_class = cls.build_header_class() - buffer = client.recv(ctypes.sizeof(header_class)) + buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) header = header_class.from_buffer_copy(buffer) fields = [] for i in range(header.length): @@ -623,7 +627,10 @@ def parse(cls, client: 'Client'): @classmethod def to_python(cls, ctype_object, *args, **kwargs): result = [] - for i in range(ctype_object.length): + length = getattr(ctype_object, "length", None) + if length is None: + return None + for i in range(length): result.append( cls.standard_type.to_python( getattr(ctype_object, 'element_{}'.format(i)), @@ -634,6 +641,8 @@ def to_python(cls, ctype_object, *args, **kwargs): @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() header_class = cls.build_header_class() header = header_class() if hasattr(header, 'type_code'): @@ -796,6 +805,9 @@ def build_header_class(cls): @classmethod def from_python(cls, value): + if value is None: + return Null.from_python() + type_id, value = value header_class = cls.build_header_class() header = header_class() @@ -815,7 +827,9 @@ def from_python(cls, value): @classmethod def to_python(cls, ctype_object, *args, **kwargs): - type_id = ctype_object.type_id + type_id = getattr(ctype_object, "type_id", None) + if type_id is None: + return None return type_id, super().to_python(ctype_object, *args, **kwargs) diff --git a/tests/test_binary.py b/tests/test_binary.py index 4c45afb..46554ea 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -12,14 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import re from collections import OrderedDict from decimal import Decimal from pyignite import GenericObjectMeta from pyignite.datatypes import ( - BinaryObject, BoolObject, IntObject, DecimalObject, LongObject, String, -) + BinaryObject, BoolObject, IntObject, DecimalObject, LongObject, String, ByteObject, ShortObject, FloatObject, + DoubleObject, CharObject, UUIDObject, DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, + ByteArrayObject, ShortArrayObject, IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, + CharArrayObject, BoolArrayObject, UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, + EnumArrayObject, StringArrayObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject) from pyignite.datatypes.prop_codes import * @@ -308,8 +311,8 @@ class NonPythonicallyNamedType( def test_complex_object_hash(client): """ - Test that Python client correctly calculates hash of the binary - object that contains negative bytes. + Test that Python client correctly calculates hash of the binary object that + contains negative bytes. """ class Internal( metaclass=GenericObjectMeta, @@ -355,3 +358,36 @@ class TestObject( hash_utf8 = BinaryObject.hashcode(obj_utf8, client=client) assert hash_utf8 == -1945378474, 'Invalid hashcode value for object with UTF-8 strings' + + +def test_complex_object_null_fields(client): + """ + Test that Python client can correctly write and read binary object that + contains null fields. + """ + def camel_to_snake(name): + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() + + fields = {camel_to_snake(type_.__name__): type_ for type_ in [ + ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, + DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, ByteArrayObject, ShortArrayObject, + IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, CharArrayObject, BoolArrayObject, + UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, EnumArrayObject, String, + StringArrayObject, DecimalObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject, + BinaryObject]} + + class AllTypesObject(metaclass=GenericObjectMeta, type_name='AllTypesObject', schema=fields): + pass + + key = 42 + null_fields_value = AllTypesObject() + + for field in fields.keys(): + setattr(null_fields_value, field, None) + + cache = client.get_or_create_cache('all_types_test_cache') + cache.put(key, null_fields_value) + + got_obj = cache.get(key) + + assert got_obj == null_fields_value, 'Objects mismatch' From d7d6d35d906cebc2f69e63e18f6ae8b1abe76132 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Thu, 4 Feb 2021 18:38:56 +0300 Subject: [PATCH 10/62] IGNITE-11528: Deprecate SqlQuery API This closes #11 --- pyignite/api/sql.py | 11 +++++------ pyignite/utils.py | 11 +++++++++++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index ebb3e30..73cacc6 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -13,11 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -Only key-value queries (scan queries) are implemented. SQL part is still -in progress. -""" - from typing import Union from pyignite.constants import * @@ -28,7 +23,7 @@ from pyignite.datatypes.sql import StatementType from pyignite.queries import Query from pyignite.queries.op_codes import * -from pyignite.utils import cache_id +from pyignite.utils import cache_id, deprecated from .result import APIResult @@ -142,6 +137,8 @@ def scan_cursor_get_page( return result +@deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " + "Use sql_fields instead") def sql( conn: 'Connection', cache: Union[str, int], table_name: str, query_str: str, page_size: int, query_args=None, @@ -227,6 +224,8 @@ def sql( return result +@deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " + "Use sql_fields instead") def sql_cursor_get_page( conn: 'Connection', cursor: int, query_id: int = None, ) -> APIResult: diff --git a/pyignite/utils.py b/pyignite/utils.py index ce00d53..ef7b6f6 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -15,6 +15,7 @@ import ctypes import decimal +import warnings from functools import wraps from threading import Event, Thread @@ -313,3 +314,13 @@ def process_delimiter(name: str, delimiter: str) -> str: Splits the name by delimiter, capitalize each part, merge. """ return ''.join([capitalize(x) for x in name.split(delimiter)]) + + +def deprecated(version, reason): + def decorator_deprecated(fn): + @wraps(fn) + def wrapper_deprecated(*args, **kwds): + warnings.warn(f'Deprecated since {version}. The reason: {reason}', category=DeprecationWarning) + return fn(*args, **kwds) + return wrapper_deprecated + return decorator_deprecated From 83208c2ef87d5b241c45dd5fd8f5ec4b58e2a8df Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Thu, 4 Feb 2021 18:39:57 +0300 Subject: [PATCH 11/62] IGNITE-12975: SQL query do not create cache This closes #12 --- pyignite/client.py | 2 +- tests/test_binary.py | 1 + tests/test_sql.py | 10 +++++++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pyignite/client.py b/pyignite/client.py index 3202b78..f8072d8 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -586,7 +586,7 @@ def generate_result(value): conn = self.random_node - schema = self.get_or_create_cache(schema) + schema = self.get_cache(schema) result = sql_fields( conn, schema.cache_id, query_str, page_size, query_args, schema.name, diff --git a/tests/test_binary.py b/tests/test_binary.py index 46554ea..45d1d25 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -64,6 +64,7 @@ def test_sql_read_as_binary(client): + client.get_or_create_cache(scheme_name) client.sql(drop_query) # create table diff --git a/tests/test_sql.py b/tests/test_sql.py index 87383d3..15f84ee 100644 --- a/tests/test_sql.py +++ b/tests/test_sql.py @@ -13,12 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest + from pyignite.api import ( sql_fields, sql_fields_cursor_get_page, - cache_get_or_create, sql, sql_cursor_get_page, + sql, sql_cursor_get_page, cache_get_configuration, ) from pyignite.datatypes.prop_codes import * +from pyignite.exceptions import SQLError from pyignite.utils import entity_id, unwrap_binary initial_data = [ @@ -186,3 +189,8 @@ def test_long_multipage_query(client): assert value == field_number * page[0] client.sql(drop_query) + + +def test_sql_not_create_cache(client): + with pytest.raises(SQLError, match=r".*Cache does not exist.*"): + client.sql(schema='IS_NOT_EXISTING', query_str='select * from IsNotExisting') From 0f2828b521aa7bf93cfdcd5302bb77434fffe1e0 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Thu, 4 Feb 2021 18:40:48 +0300 Subject: [PATCH 12/62] IGNITE-14127: Default sql page size from 1 => 1024 This closes #13 --- pyignite/client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyignite/client.py b/pyignite/client.py index f8072d8..83cb196 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -519,7 +519,7 @@ def get_cache_names(self) -> list: return cache_get_names(self.random_node) def sql( - self, query_str: str, page_size: int = 1, query_args: Iterable = None, + self, query_str: str, page_size: int = 1024, query_args: Iterable = None, schema: Union[int, str] = 'PUBLIC', statement_type: int = 0, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, @@ -531,8 +531,8 @@ def sql( Runs an SQL query and returns its result. :param query_str: SQL query string, - :param page_size: (optional) cursor page size. Default is 1, which - means that client makes one server call per row, + :param page_size: (optional) cursor page size. Default is 1024, which + means that client makes one server call per 1024 rows, :param query_args: (optional) query arguments. List of values or (value, type hint) tuples, :param schema: (optional) schema for the query. Defaults to `PUBLIC`, From 2ead7b928c82dd212273789a275b717ecafca295 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Mon, 8 Feb 2021 17:11:05 +0300 Subject: [PATCH 13/62] IGNITE-13967: Optimizations and refactoring of parsing This closes #10 --- .gitignore | 4 +- pyignite/api/affinity.py | 8 +- pyignite/api/binary.py | 70 +++--- pyignite/binary.py | 47 ++-- pyignite/cache.py | 4 +- pyignite/connection/__init__.py | 216 +++++++---------- pyignite/connection/handshake.py | 5 +- pyignite/datatypes/__init__.py | 19 ++ pyignite/datatypes/cache_properties.py | 18 +- pyignite/datatypes/complex.py | 229 ++++++------------ pyignite/datatypes/internal.py | 99 ++++---- pyignite/datatypes/null_object.py | 55 ++++- pyignite/datatypes/primitive.py | 43 ++-- pyignite/datatypes/primitive_arrays.py | 61 ++--- pyignite/datatypes/primitive_objects.py | 32 +-- pyignite/datatypes/standard.py | 210 ++++++---------- pyignite/queries/query.py | 40 +-- pyignite/queries/response.py | 49 ++-- pyignite/stream/__init__.py | 16 ++ pyignite/stream/binary_stream.py | 111 +++++++++ pyignite/utils.py | 26 +- requirements/tests.txt | 1 + tests/config/ignite-config-ssl.xml | 51 ---- tests/config/ignite-config.xml | 39 --- ...nfig-base.xml => ignite-config.xml.jinja2} | 39 ++- tests/config/{log4j.xml => log4j.xml.jinja2} | 4 +- tests/conftest.py | 8 +- tests/test_affinity_request_routing.py | 92 ++++--- tests/test_affinity_single_connection.py | 4 - tests/test_cache_composite_key_class_sql.py | 5 +- tests/test_sql.py | 4 +- tests/util.py | 71 ++---- tox.ini | 19 ++ 33 files changed, 802 insertions(+), 897 deletions(-) create mode 100644 pyignite/stream/__init__.py create mode 100644 pyignite/stream/binary_stream.py delete mode 100644 tests/config/ignite-config-ssl.xml delete mode 100644 tests/config/ignite-config.xml rename tests/config/{ignite-config-base.xml => ignite-config.xml.jinja2} (65%) rename tests/config/{log4j.xml => log4j.xml.jinja2} (90%) diff --git a/.gitignore b/.gitignore index 7372921..d28510c 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,8 @@ .eggs .pytest_cache .tox +tests/config/*.xml +junit*.xml pyignite.egg-info ignite-log-* -__pycache__ \ No newline at end of file +__pycache__ diff --git a/pyignite/api/affinity.py b/pyignite/api/affinity.py index d28cfb8..16148a1 100644 --- a/pyignite/api/affinity.py +++ b/pyignite/api/affinity.py @@ -55,12 +55,12 @@ partition_mapping = StructArray([ ('is_applicable', Bool), - ('cache_mapping', Conditional(lambda ctx: ctx['is_applicable'] == b'\x01', - lambda ctx: ctx['is_applicable'] is True, + ('cache_mapping', Conditional(lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, + lambda ctx: ctx['is_applicable'], cache_mapping, empty_cache_mapping)), - ('node_mapping', Conditional(lambda ctx: ctx['is_applicable'] == b'\x01', - lambda ctx: ctx['is_applicable'] is True, + ('node_mapping', Conditional(lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, + lambda ctx: ctx['is_applicable'], node_mapping, empty_node_mapping)), ]) diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index 722001a..0e63c17 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -24,16 +24,15 @@ from pyignite.queries.op_codes import * from pyignite.utils import int_overflow, entity_id from .result import APIResult +from ..stream import BinaryStream, READ_BACKWARD from ..queries.response import Response -def get_binary_type( - connection: 'Connection', binary_type: Union[str, int], query_id=None, -) -> APIResult: +def get_binary_type(conn: 'Connection', binary_type: Union[str, int], query_id=None) -> APIResult: """ Gets the binary type information by type ID. - :param connection: connection to Ignite server, + :param conn: connection to Ignite server, :param binary_type: binary type name or ID, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value @@ -49,39 +48,42 @@ def get_binary_type( query_id=query_id, ) - _, send_buffer = query_struct.from_python({ - 'type_id': entity_id(binary_type), - }) - connection.send(send_buffer) + with BinaryStream(conn) as stream: + query_struct.from_python(stream, { + 'type_id': entity_id(binary_type), + }) + conn.send(stream.getbuffer()) - response_head_struct = Response(protocol_version=connection.get_protocol_version(), + response_head_struct = Response(protocol_version=conn.get_protocol_version(), following=[('type_exists', Bool)]) - response_head_type, recv_buffer = response_head_struct.parse(connection) - response_head = response_head_type.from_buffer_copy(recv_buffer) - response_parts = [] - if response_head.type_exists: - resp_body_type, resp_body_buffer = body_struct.parse(connection) - response_parts.append(('body', resp_body_type)) - resp_body = resp_body_type.from_buffer_copy(resp_body_buffer) - recv_buffer += resp_body_buffer - if resp_body.is_enum: - resp_enum, resp_enum_buffer = enum_struct.parse(connection) - response_parts.append(('enums', resp_enum)) - recv_buffer += resp_enum_buffer - resp_schema_type, resp_schema_buffer = schema_struct.parse(connection) - response_parts.append(('schema', resp_schema_type)) - recv_buffer += resp_schema_buffer - - response_class = type( - 'GetBinaryTypeResponse', - (response_head_type,), - { - '_pack_': 1, - '_fields_': response_parts, - } - ) - response = response_class.from_buffer_copy(recv_buffer) + with BinaryStream(conn, conn.recv()) as stream: + init_pos = stream.tell() + response_head_type = response_head_struct.parse(stream) + response_head = stream.read_ctype(response_head_type, direction=READ_BACKWARD) + + response_parts = [] + if response_head.type_exists: + resp_body_type = body_struct.parse(stream) + response_parts.append(('body', resp_body_type)) + resp_body = stream.read_ctype(resp_body_type, direction=READ_BACKWARD) + if resp_body.is_enum: + resp_enum = enum_struct.parse(stream) + response_parts.append(('enums', resp_enum)) + + resp_schema_type = schema_struct.parse(stream) + response_parts.append(('schema', resp_schema_type)) + + response_class = type( + 'GetBinaryTypeResponse', + (response_head_type,), + { + '_pack_': 1, + '_fields_': response_parts, + } + ) + response = stream.read_ctype(response_class, position=init_pos) + result = APIResult(response) if result.status != 0: return result diff --git a/pyignite/binary.py b/pyignite/binary.py index 5d76c1b..da62bb5 100644 --- a/pyignite/binary.py +++ b/pyignite/binary.py @@ -102,18 +102,17 @@ def __new__( mcs, name, (GenericObjectProps, )+base_classes, namespace ) - def _build(self, client: 'Client' = None) -> int: + def _from_python(self, stream, save_to_buf=False): """ Method for building binary representation of the Generic object and calculating a hashcode from it. :param self: Generic object instance, - :param client: (optional) connection to Ignite cluster, + :param stream: BinaryStream + :param save_to_buf: Optional. If True, save serialized data to buffer. """ - if client is None: - compact_footer = True - else: - compact_footer = client.compact_footer + + compact_footer = stream.compact_footer # prepare header header_class = BinaryObject.build_header() @@ -129,18 +128,19 @@ def _build(self, client: 'Client' = None) -> int: header.type_id = self.type_id header.schema_id = self.schema_id + header_len = ctypes.sizeof(header_class) + initial_pos = stream.tell() + # create fields and calculate offsets offsets = [ctypes.sizeof(header_class)] - field_buffer = bytearray() schema_items = list(self.schema.items()) + + stream.seek(initial_pos + header_len) for field_name, field_type in schema_items: - partial_buffer = field_type.from_python( - getattr( - self, field_name, getattr(field_type, 'default', None) - ) - ) - offsets.append(max(offsets) + len(partial_buffer)) - field_buffer += partial_buffer + val = getattr(self, field_name, getattr(field_type, 'default', None)) + field_start_pos = stream.tell() + field_type.from_python(stream, val) + offsets.append(max(offsets) + stream.tell() - field_start_pos) offsets = offsets[:-1] @@ -160,15 +160,18 @@ def _build(self, client: 'Client' = None) -> int: schema[i].offset = offset # calculate size and hash code - header.schema_offset = ( - ctypes.sizeof(header_class) - + len(field_buffer) - ) + fields_data_len = stream.tell() - initial_pos - header_len + header.schema_offset = fields_data_len + header_len header.length = header.schema_offset + ctypes.sizeof(schema_class) - header.hash_code = hashcode(field_buffer) + header.hash_code = stream.hashcode(initial_pos + header_len, fields_data_len) + + stream.seek(initial_pos) + stream.write(header) + stream.seek(initial_pos + header.schema_offset) + stream.write(schema) - # reuse the results - self._buffer = bytes(header) + field_buffer + bytes(schema) + if save_to_buf: + self._buffer = bytes(stream.mem_view(initial_pos, stream.tell() - initial_pos)) self._hashcode = header.hash_code def _setattr(self, attr_name: str, attr_value: Any): @@ -180,7 +183,7 @@ def _setattr(self, attr_name: str, attr_value: Any): # `super()` is really need these parameters super(result, self).__setattr__(attr_name, attr_value) - setattr(result, _build.__name__, _build) + setattr(result, _from_python.__name__, _from_python) setattr(result, '__setattr__', _setattr) setattr(result, '_buffer', None) setattr(result, '_hashcode', None) diff --git a/pyignite/cache.py b/pyignite/cache.py index 64093e8..dd7dac4 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -17,7 +17,7 @@ from typing import Any, Dict, Iterable, Optional, Tuple, Union from .constants import * -from .binary import GenericObjectMeta +from .binary import GenericObjectMeta, unwrap_binary from .datatypes import prop_codes from .datatypes.internal import AnyDataObject from .exceptions import ( @@ -26,7 +26,7 @@ ) from .utils import ( cache_id, get_field_by_id, is_wrapped, - status_to_exception, unsigned, unwrap_binary, + status_to_exception, unsigned ) from .api.cache_config import ( cache_create, cache_create_with_config, diff --git a/pyignite/connection/__init__.py b/pyignite/connection/__init__.py index cf40718..0e793f8 100644 --- a/pyignite/connection/__init__.py +++ b/pyignite/connection/__init__.py @@ -35,7 +35,7 @@ from collections import OrderedDict import socket -from threading import Lock +from threading import RLock from typing import Union from pyignite.constants import * @@ -52,6 +52,8 @@ __all__ = ['Connection'] +from ..stream import BinaryStream, READ_BACKWARD + class Connection: """ @@ -60,8 +62,7 @@ class Connection: * socket wrapper. Detects fragmentation and network errors. See also https://docs.python.org/3/howto/sockets.html, - * binary protocol connector. Incapsulates handshake, data read-ahead and - failover reconnection. + * binary protocol connector. Incapsulates handshake and failover reconnection. """ _socket = None @@ -72,7 +73,6 @@ class Connection: host = None port = None timeout = None - prefetch = None username = None password = None ssl_params = {} @@ -97,7 +97,7 @@ def _check_ssl_params(params): ).format(param)) def __init__( - self, client: 'Client', prefetch: bytes = b'', timeout: int = None, + self, client: 'Client', timeout: float = 2.0, username: str = None, password: str = None, **ssl_params ): """ @@ -107,8 +107,6 @@ def __init__( https://docs.python.org/3/library/ssl.html#ssl-certificates. :param client: Ignite client object, - :param prefetch: (optional) initialize the read-ahead data buffer. - Empty by default, :param timeout: (optional) sets timeout (in seconds) for each socket operation including `connect`. 0 means non-blocking mode, which is virtually guaranteed to fail. Can accept integer or float value. @@ -143,7 +141,6 @@ def __init__( :param password: (optional) password to authenticate to Ignite cluster. """ self.client = client - self.prefetch = prefetch self.timeout = timeout self.username = username self.password = password @@ -152,7 +149,8 @@ def __init__( ssl_params['use_ssl'] = True self.ssl_params = ssl_params self._failed = False - self._in_use = Lock() + self._mux = RLock() + self._in_use = False @property def socket(self) -> socket.socket: @@ -162,17 +160,20 @@ def socket(self) -> socket.socket: @property def closed(self) -> bool: """ Tells if socket is closed. """ - return self._socket is None + with self._mux: + return self._socket is None @property def failed(self) -> bool: """ Tells if connection is failed. """ - return self._failed + with self._mux: + return self._failed @property def alive(self) -> bool: """ Tells if connection is up and no failure detected. """ - return not (self._failed or self.closed) + with self._mux: + return not (self._failed or self.closed) def __repr__(self) -> str: return '{}:{}'.format(self.host or '?', self.port or '?') @@ -189,8 +190,10 @@ def get_protocol_version(self): def _fail(self): """ set client to failed state. """ - self._failed = True - self._in_use.release() + with self._mux: + self._failed = True + + self._in_use = False def read_response(self) -> Union[dict, OrderedDict]: """ @@ -202,26 +205,27 @@ def read_response(self) -> Union[dict, OrderedDict]: ('length', Int), ('op_code', Byte), ]) - start_class, start_buffer = response_start.parse(self) - start = start_class.from_buffer_copy(start_buffer) - data = response_start.to_python(start) - response_end = None - if data['op_code'] == 0: - response_end = Struct([ - ('version_major', Short), - ('version_minor', Short), - ('version_patch', Short), - ('message', String), - ]) - elif self.get_protocol_version() >= (1, 4, 0): - response_end = Struct([ - ('node_uuid', UUIDObject), - ]) - if response_end: - end_class, end_buffer = response_end.parse(self) - end = end_class.from_buffer_copy(end_buffer) - data.update(response_end.to_python(end)) - return data + with BinaryStream(self, self.recv()) as stream: + start_class = response_start.parse(stream) + start = stream.read_ctype(start_class, direction=READ_BACKWARD) + data = response_start.to_python(start) + response_end = None + if data['op_code'] == 0: + response_end = Struct([ + ('version_major', Short), + ('version_minor', Short), + ('version_patch', Short), + ('message', String), + ]) + elif self.get_protocol_version() >= (1, 4, 0): + response_end = Struct([ + ('node_uuid', UUIDObject), + ]) + if response_end: + end_class = response_end.parse(stream) + end = stream.read_ctype(end_class, direction=READ_BACKWARD) + data.update(response_end.to_python(end)) + return data def connect( self, host: str = None, port: int = None @@ -234,9 +238,10 @@ def connect( """ detecting_protocol = False - # go non-blocking for faster reconnect - if not self._in_use.acquire(blocking=False): - raise ConnectionError('Connection is in use.') + with self._mux: + if self._in_use: + raise ConnectionError('Connection is in use.') + self._in_use = True # choose highest version first if self.client.protocol_version is None: @@ -289,7 +294,11 @@ def _connect_version( self.username, self.password ) - self.send(hs_request) + + with BinaryStream(self) as stream: + hs_request.from_python(stream) + self.send(stream.getbuffer()) + hs_response = self.read_response() if hs_response['op_code'] == 0: # disconnect but keep in use @@ -345,12 +354,7 @@ def _reconnect(self): if not self.failed: return - # return connection to initial state regardless of use lock - self.close(release=False) - try: - self._in_use.release() - except RuntimeError: - pass + self.close() # connect and silence the connection errors try: @@ -370,20 +374,7 @@ def _transfer_params(self, to: 'Connection'): to.host = self.host to.port = self.port - def clone(self, prefetch: bytes = b'') -> 'Connection': - """ - Clones this connection in its current state. - - :return: `Connection` object. - """ - clone = self.__class__(self.client, **self.ssl_params) - self._transfer_params(to=clone) - if self.alive: - clone.connect(self.host, self.port) - clone.prefetch = prefetch - return clone - - def send(self, data: bytes, flags=None): + def send(self, data: Union[bytes, bytearray, memoryview], flags=None): """ Send data down the socket. @@ -396,70 +387,45 @@ def send(self, data: bytes, flags=None): kwargs = {} if flags is not None: kwargs['flags'] = flags - data = bytes(data) - total_bytes_sent = 0 - - while total_bytes_sent < len(data): - try: - bytes_sent = self.socket.send( - data[total_bytes_sent:], - **kwargs - ) - except connection_errors: - self._fail() - self.reconnect() - raise - if bytes_sent == 0: - self._fail() - self.reconnect() - raise SocketError('Connection broken.') - total_bytes_sent += bytes_sent - - def recv(self, buffersize, flags=None) -> bytes: - """ - Receive data from socket or read-ahead buffer. - :param buffersize: bytes to receive, - :param flags: (optional) OS-specific flags, - :return: data received. - """ + try: + self.socket.sendall(data, **kwargs) + except Exception: + self._fail() + self.reconnect() + raise + + def recv(self, flags=None) -> bytearray: + def _recv(buffer, num_bytes): + bytes_to_receive = num_bytes + while bytes_to_receive > 0: + try: + bytes_rcvd = self.socket.recv_into(buffer, bytes_to_receive, **kwargs) + if bytes_rcvd == 0: + raise SocketError('Connection broken.') + except connection_errors: + self._fail() + self.reconnect() + raise + + buffer = buffer[bytes_rcvd:] + bytes_to_receive -= bytes_rcvd + if self.closed: raise SocketError('Attempt to use closed connection.') - pref_size = len(self.prefetch) - if buffersize > pref_size: - result = self.prefetch - self.prefetch = b'' - try: - result += self._recv(buffersize-pref_size, flags) - except connection_errors: - self._fail() - self.reconnect() - raise - return result - else: - result = self.prefetch[:buffersize] - self.prefetch = self.prefetch[buffersize:] - return result - - def _recv(self, buffersize, flags=None) -> bytes: - """ - Handle socket data reading. - """ kwargs = {} if flags is not None: kwargs['flags'] = flags - chunks = [] - bytes_rcvd = 0 - while bytes_rcvd < buffersize: - chunk = self.socket.recv(buffersize-bytes_rcvd, **kwargs) - if chunk == b'': - raise SocketError('Connection broken.') - chunks.append(chunk) - bytes_rcvd += len(chunk) + data = bytearray(4) + _recv(memoryview(data), 4) + response_len = int.from_bytes(data, PROTOCOL_BYTE_ORDER) + + data.extend(bytearray(response_len)) + _recv(memoryview(data)[4:], response_len) + return data - return b''.join(chunks) def close(self, release=True): """ @@ -467,16 +433,14 @@ def close(self, release=True): not required, since sockets are automatically closed when garbage-collected. """ - if release: - try: - self._in_use.release() - except RuntimeError: - pass - - if self._socket: - try: - self._socket.shutdown(socket.SHUT_RDWR) - self._socket.close() - except connection_errors: - pass - self._socket = None + with self._mux: + if self._socket: + try: + self._socket.shutdown(socket.SHUT_RDWR) + self._socket.close() + except connection_errors: + pass + self._socket = None + + if release: + self._in_use = False diff --git a/pyignite/connection/handshake.py b/pyignite/connection/handshake.py index 2e0264f..3315c4e 100644 --- a/pyignite/connection/handshake.py +++ b/pyignite/connection/handshake.py @@ -50,7 +50,7 @@ def __init__( ]) self.handshake_struct = Struct(fields) - def __bytes__(self) -> bytes: + def from_python(self, stream): handshake_data = { 'length': 8, 'op_code': OP_HANDSHAKE, @@ -69,4 +69,5 @@ def __bytes__(self) -> bytes: len(self.username), len(self.password), ]) - return self.handshake_struct.from_python(handshake_data) + + self.handshake_struct.from_python(stream, handshake_data) diff --git a/pyignite/datatypes/__init__.py b/pyignite/datatypes/__init__.py index 5024f79..49860bd 100644 --- a/pyignite/datatypes/__init__.py +++ b/pyignite/datatypes/__init__.py @@ -25,3 +25,22 @@ from .primitive_arrays import * from .primitive_objects import * from .standard import * +from ..stream import BinaryStream, READ_BACKWARD + + +def unwrap_binary(client: 'Client', wrapped: tuple) -> object: + """ + Unwrap wrapped BinaryObject and convert it to Python data. + + :param client: connection to Ignite cluster, + :param wrapped: `WrappedDataObject` value, + :return: dict representing wrapped BinaryObject. + """ + from pyignite.datatypes.complex import BinaryObject + + blob, offset = wrapped + with BinaryStream(client.random_node, blob) as stream: + data_class = BinaryObject.parse(stream) + result = BinaryObject.to_python(stream.read_ctype(data_class, direction=READ_BACKWARD), client) + + return result diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index e94db5f..eadaef9 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -92,10 +92,11 @@ def build_header(cls): ) @classmethod - def parse(cls, connection: 'Connection'): + def parse(cls, stream): + init_pos = stream.tell() header_class = cls.build_header() - header_buffer = connection.recv(ctypes.sizeof(header_class)) - data_class, data_buffer = cls.prop_data_class.parse(connection) + data_class = cls.prop_data_class.parse(stream) + prop_class = type( cls.__name__, (header_class,), @@ -106,7 +107,9 @@ def parse(cls, connection: 'Connection'): ], } ) - return prop_class, header_buffer + data_buffer + + stream.seek(init_pos + ctypes.sizeof(prop_class)) + return prop_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): @@ -115,11 +118,12 @@ def to_python(cls, ctype_object, *args, **kwargs): ) @classmethod - def from_python(cls, value): + def from_python(cls, stream, value): header_class = cls.build_header() header = header_class() header.prop_code = cls.prop_code - return bytes(header) + cls.prop_data_class.from_python(value) + stream.write(bytes(header)) + cls.prop_data_class.from_python(stream, value) class PropName(PropBase): @@ -275,7 +279,7 @@ class PropStatisticsEnabled(PropBase): class AnyProperty(PropBase): @classmethod - def from_python(cls, value): + def from_python(cls, stream, value): raise Exception( 'You must choose a certain type ' 'for your cache configuration property' diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index 6860583..aed3cda 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -15,27 +15,27 @@ from collections import OrderedDict import ctypes -import inspect +from io import SEEK_CUR from typing import Iterable, Dict from pyignite.constants import * from pyignite.exceptions import ParseError - from .base import IgniteDataType from .internal import AnyDataObject, infer_from_python from .type_codes import * from .type_ids import * from .type_names import * -from .null_object import Null - +from .null_object import Null, Nullable __all__ = [ 'Map', 'ObjectArrayObject', 'CollectionObject', 'MapObject', 'WrappedDataObject', 'BinaryObject', ] +from ..stream import BinaryStream + -class ObjectArrayObject(IgniteDataType): +class ObjectArrayObject(IgniteDataType, Nullable): """ Array of Ignite objects of any consistent type. Its Python representation is tuple(type_id, iterable of any type). The only type ID that makes sense @@ -69,20 +69,14 @@ def build_header(cls): ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - + def parse_not_null(cls, stream): header_class = cls.build_header() - buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) - header = header_class.from_buffer_copy(buffer) - fields = [] + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + fields = [] for i in range(header.length): - c_type, buffer_fragment = AnyDataObject.parse(client) - buffer += buffer_fragment + c_type = AnyDataObject.parse(stream) fields.append(('element_{}'.format(i), c_type)) final_class = type( @@ -93,15 +87,13 @@ def parse(cls, client: 'Client'): '_fields_': fields, } ) - return final_class, buffer + + return final_class @classmethod - def to_python(cls, ctype_object, *args, **kwargs): + def to_python_not_null(cls, ctype_object, *args, **kwargs): result = [] - length = getattr(ctype_object, "length", None) - if length is None: - return None - for i in range(length): + for i in range(ctype_object.length): result.append( AnyDataObject.to_python( getattr(ctype_object, 'element_{}'.format(i)), @@ -111,10 +103,7 @@ def to_python(cls, ctype_object, *args, **kwargs): return ctype_object.type_id, result @classmethod - def from_python(cls, value): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value): type_or_id, value = value header_class = cls.build_header() header = header_class() @@ -129,14 +118,13 @@ def from_python(cls, value): length = 1 header.length = length header.type_id = type_or_id - buffer = bytearray(header) + stream.write(header) for x in value: - buffer += infer_from_python(x) - return bytes(buffer) + infer_from_python(stream, x) -class WrappedDataObject(IgniteDataType): +class WrappedDataObject(IgniteDataType, Nullable): """ One or more binary objects can be wrapped in an array. This allows reading, storing, passing and writing objects efficiently without understanding @@ -162,15 +150,9 @@ def build_header(cls): ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - + def parse_not_null(cls, stream): header_class = cls.build_header() - buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) - header = header_class.from_buffer_copy(buffer) + header = stream.read_ctype(header_class) final_class = type( cls.__name__, @@ -183,21 +165,20 @@ def parse(cls, client: 'Client'): ], } ) - buffer += client.recv( - ctypes.sizeof(final_class) - ctypes.sizeof(header_class) - ) - return final_class, buffer + + stream.seek(ctypes.sizeof(final_class), SEEK_CUR) + return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): return bytes(ctype_object.payload), ctype_object.offset @classmethod - def from_python(cls, value): + def from_python(cls, stream, value): raise ParseError('Send unwrapped data.') -class CollectionObject(IgniteDataType): +class CollectionObject(IgniteDataType, Nullable): """ Similar to object array, but contains platform-agnostic deserialization type hint instead of type ID. @@ -260,20 +241,14 @@ def build_header(cls): ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - + def parse_not_null(cls, stream): header_class = cls.build_header() - buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) - header = header_class.from_buffer_copy(buffer) - fields = [] + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + fields = [] for i in range(header.length): - c_type, buffer_fragment = AnyDataObject.parse(client) - buffer += buffer_fragment + c_type = AnyDataObject.parse(stream) fields.append(('element_{}'.format(i), c_type)) final_class = type( @@ -284,7 +259,7 @@ def parse(cls, client: 'Client'): '_fields_': fields, } ) - return final_class, buffer + return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): @@ -302,10 +277,7 @@ def to_python(cls, ctype_object, *args, **kwargs): return ctype_object.type, result @classmethod - def from_python(cls, value): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value): type_or_id, value = value header_class = cls.build_header() header = header_class() @@ -320,14 +292,13 @@ def from_python(cls, value): length = 1 header.length = length header.type = type_or_id - buffer = bytearray(header) + stream.write(header) for x in value: - buffer += infer_from_python(x) - return bytes(buffer) + infer_from_python(stream, x) -class Map(IgniteDataType): +class Map(IgniteDataType, Nullable): """ Dictionary type, payload-only. @@ -358,20 +329,14 @@ def build_header(cls): ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - + def parse_not_null(cls, stream): header_class = cls.build_header() - buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) - header = header_class.from_buffer_copy(buffer) - fields = [] + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + fields = [] for i in range(header.length << 1): - c_type, buffer_fragment = AnyDataObject.parse(client) - buffer += buffer_fragment + c_type = AnyDataObject.parse(stream) fields.append(('element_{}'.format(i), c_type)) final_class = type( @@ -382,7 +347,7 @@ def parse(cls, client: 'Client'): '_fields_': fields, } ) - return final_class, buffer + return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): @@ -402,7 +367,7 @@ def to_python(cls, ctype_object, *args, **kwargs): return result @classmethod - def from_python(cls, value, type_id=None): + def from_python(cls, stream, value, type_id=None): header_class = cls.build_header() header = header_class() length = len(value) @@ -414,12 +379,11 @@ def from_python(cls, value, type_id=None): ) if hasattr(header, 'type'): header.type = type_id - buffer = bytearray(header) + stream.write(header) for k, v in value.items(): - buffer += infer_from_python(k) - buffer += infer_from_python(v) - return bytes(buffer) + infer_from_python(stream, k) + infer_from_python(stream, v) class MapObject(Map): @@ -462,15 +426,16 @@ def to_python(cls, ctype_object, *args, **kwargs): ) @classmethod - def from_python(cls, value): + def from_python(cls, stream, value): if value is None: - return Null.from_python() + Null.from_python(stream) + return type_id, value = value - return super().from_python(value, type_id) + super().from_python(stream, value, type_id) -class BinaryObject(IgniteDataType): +class BinaryObject(IgniteDataType, Nullable): _type_id = TYPE_BINARY_OBJ type_code = TC_COMPLEX_OBJECT @@ -482,42 +447,14 @@ class BinaryObject(IgniteDataType): COMPACT_FOOTER = 0x0020 @staticmethod - def find_client(): - """ - A nice hack. Extracts the nearest `Client` instance from the - call stack. - """ - from pyignite import Client - from pyignite.connection import Connection - - frame = None - try: - for rec in inspect.stack()[2:]: - frame = rec[0] - code = frame.f_code - for varname in code.co_varnames: - suspect = frame.f_locals.get(varname) - if isinstance(suspect, Client): - return suspect - if isinstance(suspect, Connection): - return suspect.client - finally: - del frame - - @staticmethod - def hashcode( - value: object, client: 'Client' = None, *args, **kwargs - ) -> int: + def hashcode(value: object, client: None) -> int: # binary objects's hashcode implementation is special in the sense # that you need to fully serialize the object to calculate # its hashcode - if value._hashcode is None: + if not value._hashcode and client : - # …and for to serialize it you need a Client instance - if client is None: - client = BinaryObject.find_client() - - value._build(client) + with BinaryStream(client.random_node) as stream: + value._from_python(stream, save_to_buf=True) return value._hashcode @@ -565,41 +502,25 @@ def schema_type(cls, flags: int): }, ) - @staticmethod - def get_dataclass(conn: 'Connection', header) -> OrderedDict: - # get field names from outer space - result = conn.client.query_binary_type( - header.type_id, - header.schema_id - ) - if not result: - raise ParseError('Binary type is not registered') - return result - @classmethod - def parse(cls, client: 'Client'): + def parse_not_null(cls, stream): from pyignite.datatypes import Struct - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type header_class = cls.build_header() - buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) - header = header_class.from_buffer_copy(buffer) + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) # ignore full schema, always retrieve fields' types and order # from complex types registry - data_class = cls.get_dataclass(client, header) + data_class = stream.get_dataclass(header) fields = data_class.schema.items() object_fields_struct = Struct(fields) - object_fields, object_fields_buffer = object_fields_struct.parse(client) - buffer += object_fields_buffer + object_fields = object_fields_struct.parse(stream) final_class_fields = [('object_fields', object_fields)] if header.flags & cls.HAS_SCHEMA: schema = cls.schema_type(header.flags) * len(fields) - buffer += client.recv(ctypes.sizeof(schema)) + stream.seek(ctypes.sizeof(schema), SEEK_CUR) final_class_fields.append(('schema', schema)) final_class = type( @@ -611,8 +532,8 @@ def parse(cls, client: 'Client'): } ) # register schema encoding approach - client.compact_footer = bool(header.flags & cls.COMPACT_FOOTER) - return final_class, buffer + stream.compact_footer = bool(header.flags & cls.COMPACT_FOOTER) + return final_class @classmethod def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): @@ -642,23 +563,9 @@ def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): return result @classmethod - def from_python(cls, value: object): - if value is None: - return Null.from_python() - - if getattr(value, '_buffer', None) is None: - client = cls.find_client() - - # if no client can be found, the class of the `value` is discarded - # and the new dataclass is automatically registered later on - if client: - client.register_binary_type(value.__class__) - else: - raise Warning( - 'Can not register binary type {}'.format(value.type_name) - ) - - # build binary representation - value._build(client) - - return value._buffer + def from_python_not_null(cls, stream, value): + stream.register_binary_type(value.__class__) + if getattr(value, '_buffer', None): + stream.write(value._buffer) + else: + value._from_python(stream) diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 23b9cc4..0111a22 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -17,6 +17,7 @@ import ctypes import decimal from datetime import date, datetime, timedelta +from io import SEEK_CUR from typing import Any, Tuple, Union, Callable import uuid @@ -33,6 +34,8 @@ 'infer_from_python', ] +from ..stream import READ_BACKWARD + def tc_map(key: bytes, _memo_map: dict = {}): """ @@ -119,11 +122,12 @@ def __init__(self, predicate1: Callable[[any], bool], predicate2: Callable[[any] self.var1 = var1 self.var2 = var2 - def parse(self, client: 'Client', context): - return self.var1.parse(client) if self.predicate1(context) else self.var2.parse(client) + def parse(self, stream, context): + return self.var1.parse(stream) if self.predicate1(context) else self.var2.parse(stream) def to_python(self, ctype_object, context, *args, **kwargs): - return self.var1.to_python(ctype_object, *args, **kwargs) if self.predicate2(context) else self.var2.to_python(ctype_object, *args, **kwargs) + return self.var1.to_python(ctype_object, *args, **kwargs) if self.predicate2(context)\ + else self.var2.to_python(ctype_object, *args, **kwargs) @attr.s class StructArray: @@ -144,14 +148,17 @@ def build_header_class(self): }, ) - def parse(self, client: 'Client'): - buffer = client.recv(ctypes.sizeof(self.counter_type)) - length = int.from_bytes(buffer, byteorder=PROTOCOL_BYTE_ORDER) - fields = [] + def parse(self, stream): + counter_type_len = ctypes.sizeof(self.counter_type) + length = int.from_bytes( + stream.mem_view(offset=counter_type_len), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(counter_type_len, SEEK_CUR) + fields = [] for i in range(length): - c_type, buffer_fragment = Struct(self.following).parse(client) - buffer += buffer_fragment + c_type = Struct(self.following).parse(stream) fields.append(('element_{}'.format(i), c_type)) data_class = type( @@ -163,7 +170,7 @@ def parse(self, client: 'Client'): }, ) - return data_class, buffer + return data_class def to_python(self, ctype_object, *args, **kwargs): result = [] @@ -179,20 +186,19 @@ def to_python(self, ctype_object, *args, **kwargs): ) return result - def from_python(self, value): + def from_python(self, stream, value): length = len(value) header_class = self.build_header_class() header = header_class() header.length = length - buffer = bytearray(header) + + stream.write(header) for i, v in enumerate(value): for default_key, default_value in self.defaults.items(): v.setdefault(default_key, default_value) for name, el_class in self.following: - buffer += el_class.from_python(v[name]) - - return bytes(buffer) + el_class.from_python(stream, v[name]) @attr.s @@ -202,21 +208,13 @@ class Struct: dict_type = attr.ib(default=OrderedDict) defaults = attr.ib(type=dict, default={}) - def parse( - self, client: 'Client' - ) -> Tuple[ctypes.LittleEndianStructure, bytes]: - buffer = b'' - fields = [] - values = {} - + def parse(self, stream): + fields, values = [], {} for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) - c_type, buffer_fragment = c_type.parse(client, values) if is_cond else c_type.parse(client) - buffer += buffer_fragment - + c_type = c_type.parse(stream, values) if is_cond else c_type.parse(stream) fields.append((name, c_type)) - - values[name] = buffer_fragment + values[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) data_class = type( 'Struct', @@ -227,7 +225,7 @@ def parse( }, ) - return data_class, buffer + return data_class def to_python( self, ctype_object, *args, **kwargs @@ -245,16 +243,12 @@ def to_python( ) return result - def from_python(self, value) -> bytes: - buffer = b'' - + def from_python(self, stream, value): for default_key, default_value in self.defaults.items(): value.setdefault(default_key, default_value) for name, el_class in self.fields: - buffer += el_class.from_python(value[name]) - - return buffer + el_class.from_python(stream, value[name]) class AnyDataObject: @@ -299,14 +293,13 @@ def get_subtype(iterable, allow_none=False): return type_first @classmethod - def parse(cls, client: 'Client'): - type_code = client.recv(ctypes.sizeof(ctypes.c_byte)) + def parse(cls, stream): + type_code = bytes(stream.mem_view(offset=ctypes.sizeof(ctypes.c_byte))) try: data_class = tc_map(type_code) except KeyError: raise ParseError('Unknown type code: `{}`'.format(type_code)) - client.prefetch += type_code - return data_class.parse(client) + return data_class.parse(stream) @classmethod def to_python(cls, ctype_object, *args, **kwargs): @@ -418,11 +411,12 @@ def map_python_type(cls, value): ) @classmethod - def from_python(cls, value): - return cls.map_python_type(value).from_python(value) + def from_python(cls, stream, value): + p_type = cls.map_python_type(value) + p_type.from_python(stream, value) -def infer_from_python(value: Any): +def infer_from_python(stream, value: Any): """ Convert pythonic value to ctypes buffer, type hint-aware. @@ -433,7 +427,8 @@ def infer_from_python(value: Any): value, data_type = value else: data_type = AnyDataObject - return data_type.from_python(value) + + data_type.from_python(stream, value) @attr.s @@ -455,15 +450,14 @@ def build_header(self): } ) - def parse(self, client: 'Client'): + def parse(self, stream): header_class = self.build_header() - buffer = client.recv(ctypes.sizeof(header_class)) - header = header_class.from_buffer_copy(buffer) - fields = [] + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + fields = [] for i in range(header.length): - c_type, buffer_fragment = super().parse(client) - buffer += buffer_fragment + c_type = super().parse(stream) fields.append(('element_{}'.format(i), c_type)) final_class = type( @@ -474,7 +468,7 @@ def parse(self, client: 'Client'): '_fields_': fields, } ) - return final_class, buffer + return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): @@ -491,7 +485,7 @@ def to_python(cls, ctype_object, *args, **kwargs): ) return result - def from_python(self, value): + def from_python(self, stream, value): header_class = self.build_header() header = header_class() @@ -501,8 +495,7 @@ def from_python(self, value): value = [value] length = 1 header.length = length - buffer = bytearray(header) + stream.write(header) for x in value: - buffer += infer_from_python(x) - return bytes(buffer) + infer_from_python(stream, x) diff --git a/pyignite/datatypes/null_object.py b/pyignite/datatypes/null_object.py index 19b41c7..912ded8 100644 --- a/pyignite/datatypes/null_object.py +++ b/pyignite/datatypes/null_object.py @@ -20,6 +20,7 @@ """ import ctypes +from io import SEEK_CUR from typing import Any from .base import IgniteDataType @@ -28,6 +29,8 @@ __all__ = ['Null'] +from ..constants import PROTOCOL_BYTE_ORDER + class Null(IgniteDataType): default = None @@ -55,16 +58,56 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def parse(cls, client: 'Client'): - buffer = client.recv(ctypes.sizeof(ctypes.c_byte)) - data_type = cls.build_c_type() - return data_type, buffer + def parse(cls, stream): + init_pos, offset = stream.tell(), ctypes.sizeof(ctypes.c_byte) + stream.seek(offset, SEEK_CUR) + return cls.build_c_type() @staticmethod def to_python(*args, **kwargs): return None @staticmethod - def from_python(*args): - return TC_NULL + def from_python(stream, *args): + stream.write(TC_NULL) + + +class Nullable: + @classmethod + def parse_not_null(cls, stream): + raise NotImplementedError + + @classmethod + def parse(cls, stream): + type_len = ctypes.sizeof(ctypes.c_byte) + + if stream.mem_view(offset=type_len) == TC_NULL: + stream.seek(type_len, SEEK_CUR) + return Null.build_c_type() + + return cls.parse_not_null(stream) + @classmethod + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + raise NotImplementedError + + @classmethod + def to_python(cls, ctypes_object, *args, **kwargs): + if ctypes_object.type_code == int.from_bytes( + TC_NULL, + byteorder=PROTOCOL_BYTE_ORDER + ): + return None + + return cls.to_python_not_null(ctypes_object, *args, **kwargs) + + @classmethod + def from_python_not_null(cls, stream, value): + raise NotImplementedError + + @classmethod + def from_python(cls, stream, value): + if value is None: + Null.from_python(stream) + else: + cls.from_python_not_null(stream, value) diff --git a/pyignite/datatypes/primitive.py b/pyignite/datatypes/primitive.py index d549fda..ffa2e32 100644 --- a/pyignite/datatypes/primitive.py +++ b/pyignite/datatypes/primitive.py @@ -15,7 +15,7 @@ import ctypes import struct -import sys +from io import SEEK_CUR from pyignite.constants import * from .base import IgniteDataType @@ -47,8 +47,10 @@ class Primitive(IgniteDataType): c_type = None @classmethod - def parse(cls, client: 'Client'): - return cls.c_type, client.recv(ctypes.sizeof(cls.c_type)) + def parse(cls, stream): + init_pos, offset = stream.tell(), ctypes.sizeof(cls.c_type) + stream.seek(offset, SEEK_CUR) + return cls.c_type @classmethod def to_python(cls, ctype_object, *args, **kwargs): @@ -61,8 +63,8 @@ class Byte(Primitive): c_type = ctypes.c_byte @classmethod - def from_python(cls, value): - return struct.pack(" 0: + @classmethod + def to_python_not_null(cls, ctype_object, *args, **kwargs): + if ctype_object.length > 0: return ctype_object.data.decode(PROTOCOL_STRING_ENCODING) - else: - return '' - @classmethod - def from_python(cls, value): - if value is None: - return Null.from_python() + return '' + @classmethod + def from_python_not_null(cls, stream, value): if isinstance(value, str): value = value.encode(PROTOCOL_STRING_ENCODING) length = len(value) @@ -135,10 +120,11 @@ def from_python(cls, value): ) data_object.length = length data_object.data = value - return bytes(data_object) + + stream.write(data_object) -class DecimalObject(IgniteDataType): +class DecimalObject(IgniteDataType, Nullable): _type_name = NAME_DECIMAL _type_id = TYPE_DECIMAL type_code = TC_DECIMAL @@ -165,18 +151,10 @@ def build_c_header(cls): ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - # Decimal or Null - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - + def parse_not_null(cls, stream): header_class = cls.build_c_header() - buffer = tc_type + client.recv( - ctypes.sizeof(header_class) - - len(tc_type) - ) - header = header_class.from_buffer_copy(buffer) + header = stream.read_ctype(header_class) + data_type = type( cls.__name__, (header_class,), @@ -187,17 +165,12 @@ def parse(cls, client: 'Client'): ], } ) - buffer += client.recv( - ctypes.sizeof(data_type) - - ctypes.sizeof(header_class) - ) - return data_type, buffer - @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - if getattr(ctype_object, 'length', None) is None: - return None + stream.seek(ctypes.sizeof(data_type), SEEK_CUR) + return data_type + @classmethod + def to_python_not_null(cls, ctype_object, *args, **kwargs): sign = 1 if ctype_object.data[0] & 0x80 else 0 data = ctype_object.data[1:] data.insert(0, ctype_object.data[0] & 0x7f) @@ -218,10 +191,7 @@ def to_python(cls, ctype_object, *args, **kwargs): return result @classmethod - def from_python(cls, value: decimal.Decimal): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value: decimal.Decimal): sign, digits, scale = value.normalize().as_tuple() integer = int(''.join([str(d) for d in digits])) # calculate number of bytes (at least one, and not forget the sign bit) @@ -257,7 +227,8 @@ def from_python(cls, value: decimal.Decimal): data_object.scale = -scale for i in range(length): data_object.data[i] = data[i] - return bytes(data_object) + + stream.write(data_object) class UUIDObject(StandardObject): @@ -300,10 +271,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: uuid.UUID): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value: uuid.UUID): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -312,15 +280,11 @@ def from_python(cls, value: uuid.UUID): ) for i, byte in zip(cls.UUID_BYTE_ORDER, bytearray(value.bytes)): data_object.value[i] = byte - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, *args, **kwargs): uuid_array = bytearray(ctypes_object.value) return uuid.UUID( bytes=bytes([uuid_array[i] for i in cls.UUID_BYTE_ORDER]) @@ -367,9 +331,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: tuple): - if value is None: - return Null.from_python() + def from_python_not_null(cls, stream, value: tuple): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -378,15 +340,11 @@ def from_python(cls, value: tuple): ) data_object.epoch = int(value[0].timestamp() * 1000) data_object.fraction = value[1] - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, *args, **kwargs): return ( datetime.fromtimestamp(ctypes_object.epoch/1000), ctypes_object.fraction @@ -428,9 +386,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: [date, datetime]): - if value is None: - return Null.from_python() + def from_python_not_null(cls, stream, value: [date, datetime]): if type(value) is date: value = datetime.combine(value, time()) data_type = cls.build_c_type() @@ -440,15 +396,11 @@ def from_python(cls, value: [date, datetime]): byteorder=PROTOCOL_BYTE_ORDER ) data_object.epoch = int(value.timestamp() * 1000) - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, *args, **kwargs): return datetime.fromtimestamp(ctypes_object.epoch/1000) @@ -486,9 +438,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: timedelta): - if value is None: - return Null.from_python() + def from_python_not_null(cls, stream, value: timedelta): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -496,15 +446,11 @@ def from_python(cls, value: timedelta): byteorder=PROTOCOL_BYTE_ORDER ) data_object.value = int(value.total_seconds() * 1000) - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, *args, **kwargs): return timedelta(milliseconds=ctypes_object.value) @@ -539,10 +485,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python(cls, value: tuple): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value: tuple): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -550,15 +493,11 @@ def from_python(cls, value: tuple): byteorder=PROTOCOL_BYTE_ORDER ) data_object.type_id, data_object.ordinal = value - return bytes(data_object) + + stream.write(data_object) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): - return None + def to_python_not_null(cls, ctypes_object, *args, **kwargs): return ctypes_object.type_id, ctypes_object.ordinal @@ -571,7 +510,7 @@ class BinaryEnumObject(EnumObject): type_code = TC_BINARY_ENUM -class StandardArray(IgniteDataType): +class StandardArray(IgniteDataType, Nullable): """ Base class for array of primitives. Payload-only. """ @@ -599,19 +538,14 @@ def build_header_class(cls): ) @classmethod - def parse(cls, client: 'Client'): - tc_type = client.recv(ctypes.sizeof(ctypes.c_byte)) - - if tc_type == TC_NULL: - return Null.build_c_type(), tc_type - + def parse_not_null(cls, stream): header_class = cls.build_header_class() - buffer = tc_type + client.recv(ctypes.sizeof(header_class) - len(tc_type)) - header = header_class.from_buffer_copy(buffer) + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + fields = [] for i in range(header.length): - c_type, buffer_fragment = cls.standard_type.parse(client) - buffer += buffer_fragment + c_type = cls.standard_type.parse(stream) fields.append(('element_{}'.format(i), c_type)) final_class = type( @@ -622,14 +556,15 @@ def parse(cls, client: 'Client'): '_fields_': fields, } ) - return final_class, buffer + return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): - result = [] length = getattr(ctype_object, "length", None) if length is None: return None + + result = [] for i in range(length): result.append( cls.standard_type.to_python( @@ -640,9 +575,7 @@ def to_python(cls, ctype_object, *args, **kwargs): return result @classmethod - def from_python(cls, value): - if value is None: - return Null.from_python() + def from_python_not_null(cls, stream, value): header_class = cls.build_header_class() header = header_class() if hasattr(header, 'type_code'): @@ -652,11 +585,10 @@ def from_python(cls, value): ) length = len(value) header.length = length - buffer = bytearray(header) + stream.write(header) for x in value: - buffer += cls.standard_type.from_python(x) - return bytes(buffer) + cls.standard_type.from_python(stream, x) class StringArray(StandardArray): @@ -804,10 +736,7 @@ def build_header_class(cls): ) @classmethod - def from_python(cls, value): - if value is None: - return Null.from_python() - + def from_python_not_null(cls, stream, value): type_id, value = value header_class = cls.build_header_class() header = header_class() @@ -819,11 +748,10 @@ def from_python(cls, value): length = len(value) header.length = length header.type_id = type_id - buffer = bytearray(header) + stream.write(header) for x in value: - buffer += cls.standard_type.from_python(x) - return bytes(buffer) + cls.standard_type.from_python(stream, x) @classmethod def to_python(cls, ctype_object, *args, **kwargs): diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index 69b6fa2..5bd114b 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -21,6 +21,7 @@ from pyignite.connection import Connection from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED from pyignite.queries.response import Response, SQLResponse +from pyignite.stream import BinaryStream, READ_BACKWARD @attr.s @@ -47,31 +48,28 @@ def build_c_type(cls): ) return cls._query_c_type - def _build_header(self, buffer: bytearray, values: dict): + def _build_header(self, stream, values: dict): header_class = self.build_c_type() + header_len = ctypes.sizeof(header_class) + init_pos = stream.tell() + stream.seek(init_pos + header_len) + header = header_class() header.op_code = self.op_code if self.query_id is None: header.query_id = randint(MIN_LONG, MAX_LONG) for name, c_type in self.following: - buffer += c_type.from_python(values[name]) + c_type.from_python(stream, values[name]) - header.length = ( - len(buffer) - + ctypes.sizeof(header_class) - - ctypes.sizeof(ctypes.c_int) - ) + header.length = stream.tell() - init_pos - ctypes.sizeof(ctypes.c_int) + stream.seek(init_pos) return header - def from_python(self, values: dict = None): - if values is None: - values = {} - buffer = bytearray() - header = self._build_header(buffer, values) - buffer[:0] = bytes(header) - return header.query_id, bytes(buffer) + def from_python(self, stream, values: dict = None): + header = self._build_header(stream, values if values else {}) + stream.write(header) def perform( self, conn: Connection, query_params: dict = None, @@ -89,8 +87,9 @@ def perform( :return: instance of :class:`~pyignite.api.result.APIResult` with raw value (may undergo further processing in API functions). """ - _, send_buffer = self.from_python(query_params) - conn.send(send_buffer) + with BinaryStream(conn) as stream: + self.from_python(stream, query_params) + conn.send(stream.getbuffer()) if sql: response_struct = SQLResponse(protocol_version=conn.get_protocol_version(), @@ -99,8 +98,9 @@ def perform( response_struct = Response(protocol_version=conn.get_protocol_version(), following=response_config) - response_ctype, recv_buffer = response_struct.parse(conn) - response = response_ctype.from_buffer_copy(recv_buffer) + with BinaryStream(conn, conn.recv()) as stream: + response_ctype = response_struct.parse(stream) + response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) # this test depends on protocol version if getattr(response, 'flags', False) & RHF_TOPOLOGY_CHANGED: @@ -140,7 +140,7 @@ def build_c_type(cls): ) return cls._query_c_type - def _build_header(self, buffer: bytearray, values: dict): - header = super()._build_header(buffer, values) + def _build_header(self, stream, values: dict): + header = super()._build_header(stream, values) header.config_length = header.length - ctypes.sizeof(type(header)) return header diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index 05a519a..016f577 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from io import SEEK_CUR import attr from collections import OrderedDict @@ -21,6 +22,7 @@ from pyignite.connection import Connection from pyignite.datatypes import AnyDataObject, Bool, Int, Long, String, StringArray, Struct from pyignite.queries.op_codes import OP_SUCCESS +from pyignite.stream import READ_BACKWARD @attr.s @@ -55,12 +57,14 @@ def build_header(self): ) return self._response_header - def parse(self, conn: Connection): + def parse(self, stream): + init_pos = stream.tell() header_class = self.build_header() - buffer = bytearray(conn.recv(ctypes.sizeof(header_class))) - header = header_class.from_buffer_copy(buffer) - fields = [] + header_len = ctypes.sizeof(header_class) + header = stream.read_ctype(header_class) + stream.seek(header_len, SEEK_CUR) + fields = [] has_error = False if self.protocol_version and self.protocol_version >= (1, 4, 0): if header.flags & RHF_TOPOLOGY_CHANGED: @@ -76,20 +80,19 @@ def parse(self, conn: Connection): has_error = header.status_code != OP_SUCCESS if fields: - buffer += conn.recv( - sum([ctypes.sizeof(c_type) for _, c_type in fields]) - ) + stream.seek(sum(ctypes.sizeof(c_type) for _, c_type in fields), SEEK_CUR) if has_error: - msg_type, buffer_fragment = String.parse(conn) - buffer += buffer_fragment + msg_type = String.parse(stream) fields.append(('error_message', msg_type)) else: - self._parse_success(conn, buffer, fields) + self._parse_success(stream, fields) - return self._create_parse_result(conn, header_class, fields, buffer) + response_class = self._create_response_class(stream, header_class, fields) + stream.seek(init_pos + ctypes.sizeof(response_class)) + return self._create_response_class(stream, header_class, fields) - def _create_parse_result(self, conn: Connection, header_class, fields: list, buffer: bytearray): + def _create_response_class(self, stream, header_class, fields: list): response_class = type( 'Response', (header_class,), @@ -98,12 +101,11 @@ def _create_parse_result(self, conn: Connection, header_class, fields: list, buf '_fields_': fields, } ) - return response_class, bytes(buffer) + return response_class - def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): + def _parse_success(self, stream, fields: list): for name, ignite_type in self.following: - c_type, buffer_fragment = ignite_type.parse(conn) - buffer += buffer_fragment + c_type = ignite_type.parse(stream) fields.append((name, c_type)) def to_python(self, ctype_object, *args, **kwargs): @@ -134,7 +136,7 @@ def fields_or_field_count(self): return 'fields', StringArray return 'field_count', Int - def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): + def _parse_success(self, stream, fields: list): following = [ self.fields_or_field_count(), ('row_count', Int), @@ -142,9 +144,8 @@ def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): if self.has_cursor: following.insert(0, ('cursor', Long)) body_struct = Struct(following) - body_class, body_buffer = body_struct.parse(conn) - body = body_class.from_buffer_copy(body_buffer) - buffer += body_buffer + body_class = body_struct.parse(stream) + body = stream.read_ctype(body_class, direction=READ_BACKWARD) if self.include_field_names: field_count = body.fields.length @@ -155,9 +156,8 @@ def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): for i in range(body.row_count): row_fields = [] for j in range(field_count): - field_class, field_buffer = AnyDataObject.parse(conn) + field_class = AnyDataObject.parse(stream) row_fields.append(('column_{}'.format(j), field_class)) - buffer += field_buffer row_class = type( 'SQLResponseRow', @@ -182,7 +182,7 @@ def _parse_success(self, conn: Connection, buffer: bytearray, fields: list): ('more', ctypes.c_byte), ] - def _create_parse_result(self, conn: Connection, header_class, fields: list, buffer: bytearray): + def _create_response_class(self, stream, header_class, fields: list): final_class = type( 'SQLResponse', (header_class,), @@ -191,8 +191,7 @@ def _create_parse_result(self, conn: Connection, header_class, fields: list, buf '_fields_': fields, } ) - buffer += conn.recv(ctypes.sizeof(final_class) - len(buffer)) - return final_class, bytes(buffer) + return final_class def to_python(self, ctype_object, *args, **kwargs): if getattr(ctype_object, 'status_code', 0) == 0: diff --git a/pyignite/stream/__init__.py b/pyignite/stream/__init__.py new file mode 100644 index 0000000..94153b4 --- /dev/null +++ b/pyignite/stream/__init__.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .binary_stream import BinaryStream, READ_FORWARD, READ_BACKWARD \ No newline at end of file diff --git a/pyignite/stream/binary_stream.py b/pyignite/stream/binary_stream.py new file mode 100644 index 0000000..1ecdcfb --- /dev/null +++ b/pyignite/stream/binary_stream.py @@ -0,0 +1,111 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +from io import BytesIO + +import pyignite.utils as ignite_utils + +READ_FORWARD = 0 +READ_BACKWARD = 1 + + +class BinaryStream: + def __init__(self, conn, buf=None): + """ + Initialize binary stream around buffers. + + :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. + :param conn: Connection instance, required. + """ + from pyignite.connection import Connection + + if not isinstance(conn, Connection): + raise TypeError(f"invalid parameter: expected instance of {Connection}") + + if buf and not isinstance(buf, (bytearray, bytes, memoryview)): + raise TypeError(f"invalid parameter: expected bytes-like object") + + self.conn = conn + self.stream = BytesIO(buf) if buf else BytesIO() + + @property + def compact_footer(self) -> bool: + return self.conn.client.compact_footer + + @compact_footer.setter + def compact_footer(self, value: bool): + self.conn.client.compact_footer = value + + def read(self, size): + buf = bytearray(size) + self.stream.readinto(buf) + return buf + + def read_ctype(self, ctype_class, position=None, direction=READ_FORWARD): + ctype_len = ctypes.sizeof(ctype_class) + + if position is not None and position >= 0: + init_position = position + else: + init_position = self.tell() + + if direction == READ_FORWARD: + start, end = init_position, init_position + ctype_len + else: + start, end = init_position - ctype_len, init_position + + buf = self.stream.getbuffer()[start:end] + return ctype_class.from_buffer_copy(buf) + + def write(self, buf): + return self.stream.write(buf) + + def tell(self): + return self.stream.tell() + + def seek(self, *args, **kwargs): + return self.stream.seek(*args, **kwargs) + + def getvalue(self): + return self.stream.getvalue() + + def getbuffer(self): + return self.stream.getbuffer() + + def mem_view(self, start=-1, offset=0): + start = start if start >= 0 else self.tell() + return self.stream.getbuffer()[start:start+offset] + + def hashcode(self, start, bytes_len): + return ignite_utils.hashcode(self.stream.getbuffer()[start:start+bytes_len]) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.stream.close() + + def get_dataclass(self, header): + # get field names from outer space + result = self.conn.client.query_binary_type( + header.type_id, + header.schema_id + ) + if not result: + raise RuntimeError('Binary type is not registered') + return result + + def register_binary_type(self, *args, **kwargs): + return self.conn.client.register_binary_type(*args, **kwargs) diff --git a/pyignite/utils.py b/pyignite/utils.py index ef7b6f6..3d0378f 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -19,7 +19,7 @@ from functools import wraps from threading import Event, Thread -from typing import Any, Callable, Optional, Type, Tuple, Union +from typing import Any, Optional, Type, Tuple, Union from pyignite.datatypes.base import IgniteDataType from .constants import * @@ -85,29 +85,7 @@ def int_overflow(value: int) -> int: return ((value ^ 0x80000000) & 0xffffffff) - 0x80000000 -def unwrap_binary(client: 'Client', wrapped: tuple) -> object: - """ - Unwrap wrapped BinaryObject and convert it to Python data. - - :param client: connection to Ignite cluster, - :param wrapped: `WrappedDataObject` value, - :return: dict representing wrapped BinaryObject. - """ - from pyignite.datatypes.complex import BinaryObject - - blob, offset = wrapped - conn_clone = client.random_node.clone(prefetch=blob) - conn_clone.pos = offset - data_class, data_bytes = BinaryObject.parse(conn_clone) - result = BinaryObject.to_python( - data_class.from_buffer_copy(data_bytes), - client, - ) - conn_clone.close() - return result - - -def hashcode(data: Union[str, bytes]) -> int: +def hashcode(data: Union[str, bytes, bytearray, memoryview]) -> int: """ Calculate hash code used for identifying objects in Ignite binary API. diff --git a/requirements/tests.txt b/requirements/tests.txt index 327f501..893928e 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -4,3 +4,4 @@ pytest==3.6.1 pytest-cov==2.5.1 teamcity-messages==1.21 psutil==5.6.5 +jinja2==2.11.3 diff --git a/tests/config/ignite-config-ssl.xml b/tests/config/ignite-config-ssl.xml deleted file mode 100644 index 827405c..0000000 --- a/tests/config/ignite-config-ssl.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/config/ignite-config.xml b/tests/config/ignite-config.xml deleted file mode 100644 index 09fba2c..0000000 --- a/tests/config/ignite-config.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - - - - - - - - diff --git a/tests/config/ignite-config-base.xml b/tests/config/ignite-config.xml.jinja2 similarity index 65% rename from tests/config/ignite-config-base.xml rename to tests/config/ignite-config.xml.jinja2 index 7487618..322a958 100644 --- a/tests/config/ignite-config-base.xml +++ b/tests/config/ignite-config.xml.jinja2 @@ -26,12 +26,35 @@ http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd"> - - - - + + {% if use_ssl %} + + {% endif %} + + + + + + + {% if use_ssl %} + + + + + + + + + + + + + {% endif %} + + + + - @@ -42,7 +65,7 @@ - 127.0.0.1:48500..48503 + 127.0.0.1:48500..48510 @@ -69,9 +92,9 @@ - + - + diff --git a/tests/config/log4j.xml b/tests/config/log4j.xml.jinja2 similarity index 90% rename from tests/config/log4j.xml rename to tests/config/log4j.xml.jinja2 index f5562d0..628f66c 100644 --- a/tests/config/log4j.xml +++ b/tests/config/log4j.xml.jinja2 @@ -23,8 +23,8 @@ + filePattern="logs/ignite-log-{{ ignite_instance_idx }}-%i.txt" + fileName="logs/ignite-log-{{ ignite_instance_idx }}.txt"> diff --git a/tests/conftest.py b/tests/conftest.py index 9974b16..54a7fda 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -22,7 +22,7 @@ from pyignite import Client from pyignite.constants import * from pyignite.api import cache_create, cache_destroy -from tests.util import _start_ignite, start_ignite_gen, get_request_grid_idx +from tests.util import _start_ignite, start_ignite_gen class BoolParser(argparse.Action): @@ -134,12 +134,6 @@ def cache(client): cache_destroy(conn, cache_name) -@pytest.fixture(autouse=True) -def log_init(): - # Init log call timestamp - get_request_grid_idx() - - @pytest.fixture(scope='module') def start_client(use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version,username, password): diff --git a/tests/test_affinity_request_routing.py b/tests/test_affinity_request_routing.py index eb46ab6..cd0c015 100644 --- a/tests/test_affinity_request_routing.py +++ b/tests/test_affinity_request_routing.py @@ -13,18 +13,56 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict - +from collections import OrderedDict, deque import pytest from pyignite import * +from pyignite.connection import Connection from pyignite.datatypes import * from pyignite.datatypes.cache_config import CacheMode from pyignite.datatypes.prop_codes import * from tests.util import * -@pytest.mark.parametrize("key,grid_idx", [(1, 3), (2, 1), (3, 1), (4, 3), (5, 1), (6, 3), (11, 2), (13, 2), (19, 2)]) +requests = deque() +old_send = Connection.send + + +def patched_send(self, *args, **kwargs): + """Patched send function that push to queue idx of server to which request is routed.""" + requests.append(self.port % 100) + return old_send(self, *args, **kwargs) + + +def setup_function(): + requests.clear() + Connection.send = patched_send + + +def teardown_function(): + Connection.send = old_send + + +def wait_for_affinity_distribution(cache, key, node_idx, timeout=30): + real_node_idx = 0 + + def check_grid_idx(): + nonlocal real_node_idx + try: + cache.get(key) + real_node_idx = requests.pop() + except (OSError, IOError): + return False + return real_node_idx == node_idx + + res = wait_for_condition(check_grid_idx, timeout=timeout) + + if not res: + raise TimeoutError(f"failed to wait for affinity distribution, expected node_idx {node_idx}," + f"got {real_node_idx} instead") + + +@pytest.mark.parametrize("key,grid_idx", [(1, 1), (2, 2), (3, 3), (4, 1), (5, 1), (6, 2), (11, 1), (13, 1), (19, 1)]) @pytest.mark.parametrize("backups", [0, 1, 2, 3]) def test_cache_operation_on_primitive_key_routes_request_to_primary_node( request, key, grid_idx, backups, client_partition_aware): @@ -34,52 +72,51 @@ def test_cache_operation_on_primitive_key_routes_request_to_primary_node( PROP_BACKUPS_NUMBER: backups, }) - # Warm up affinity map cache.put(key, key) - get_request_grid_idx() + wait_for_affinity_distribution(cache, key, grid_idx) # Test cache.get(key) - assert get_request_grid_idx() == grid_idx + assert requests.pop() == grid_idx cache.put(key, key) - assert get_request_grid_idx("Put") == grid_idx + assert requests.pop() == grid_idx cache.replace(key, key + 1) - assert get_request_grid_idx("Replace") == grid_idx + assert requests.pop() == grid_idx cache.clear_key(key) - assert get_request_grid_idx("ClearKey") == grid_idx + assert requests.pop() == grid_idx cache.contains_key(key) - assert get_request_grid_idx("ContainsKey") == grid_idx + assert requests.pop() == grid_idx cache.get_and_put(key, 3) - assert get_request_grid_idx("GetAndPut") == grid_idx + assert requests.pop() == grid_idx cache.get_and_put_if_absent(key, 4) - assert get_request_grid_idx("GetAndPutIfAbsent") == grid_idx + assert requests.pop() == grid_idx cache.put_if_absent(key, 5) - assert get_request_grid_idx("PutIfAbsent") == grid_idx + assert requests.pop() == grid_idx cache.get_and_remove(key) - assert get_request_grid_idx("GetAndRemove") == grid_idx + assert requests.pop() == grid_idx cache.get_and_replace(key, 6) - assert get_request_grid_idx("GetAndReplace") == grid_idx + assert requests.pop() == grid_idx cache.remove_key(key) - assert get_request_grid_idx("RemoveKey") == grid_idx + assert requests.pop() == grid_idx cache.remove_if_equals(key, -1) - assert get_request_grid_idx("RemoveIfEquals") == grid_idx + assert requests.pop() == grid_idx cache.replace(key, -1) - assert get_request_grid_idx("Replace") == grid_idx + assert requests.pop() == grid_idx cache.replace_if_equals(key, 10, -10) - assert get_request_grid_idx("ReplaceIfEquals") == grid_idx + assert requests.pop() == grid_idx @pytest.mark.skip(reason="Custom key objects are not supported yet") @@ -121,31 +158,28 @@ class AffinityTestType1( cache.put(key_obj, 1) cache.put(key_obj, 2) - assert get_request_grid_idx("Put") == grid_idx + assert requests.pop() == grid_idx -@pytest.mark.skip("https://issues.apache.org/jira/browse/IGNITE-13967") def test_cache_operation_routed_to_new_cluster_node(request, start_ignite_server, start_client): client = start_client(partition_aware=True) client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802), ("127.0.0.1", 10803), ("127.0.0.1", 10804)]) cache = client.get_or_create_cache(request.node.name) key = 12 + wait_for_affinity_distribution(cache, key, 3) cache.put(key, key) cache.put(key, key) - assert get_request_grid_idx("Put") == 3 + assert requests.pop() == 3 srv = start_ignite_server(4) try: # Wait for rebalance and partition map exchange - def check_grid_idx(): - cache.get(key) - return get_request_grid_idx() == 4 - wait_for_condition(check_grid_idx) + wait_for_affinity_distribution(cache, key, 4) # Response is correct and comes from the new node res = cache.get_and_remove(key) assert res == key - assert get_request_grid_idx("GetAndRemove") == 4 + assert requests.pop() == 4 finally: kill_process_tree(srv.pid) @@ -167,13 +201,13 @@ def verify_random_node(cache): key = 1 cache.put(key, key) - idx1 = get_request_grid_idx("Put") + idx1 = requests.pop() idx2 = idx1 # Try 10 times - random node may end up being the same for _ in range(1, 10): cache.put(key, key) - idx2 = get_request_grid_idx("Put") + idx2 = requests.pop() if idx2 != idx1: break assert idx1 != idx2 diff --git a/tests/test_affinity_single_connection.py b/tests/test_affinity_single_connection.py index c40393c..1943384 100644 --- a/tests/test_affinity_single_connection.py +++ b/tests/test_affinity_single_connection.py @@ -13,10 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - -from tests.util import get_request_grid_idx - def test_all_cache_operations_with_partition_aware_client_on_single_server(request, client_partition_aware_single_server): cache = client_partition_aware_single_server.get_or_create_cache(request.node.name) diff --git a/tests/test_cache_composite_key_class_sql.py b/tests/test_cache_composite_key_class_sql.py index 2f1705f..989a229 100644 --- a/tests/test_cache_composite_key_class_sql.py +++ b/tests/test_cache_composite_key_class_sql.py @@ -111,13 +111,12 @@ def test_python_sql_finds_inserted_value_with_composite_key(client): def validate_query_result(student_key, student_val, query_result): - ''' + """ Compare query result with expected key and value. - ''' + """ assert len(query_result) == 2 sql_row = dict(zip(query_result[0], query_result[1])) - assert sql_row["_KEY"][0] == student_key._buffer assert sql_row['ID'] == student_key.ID assert sql_row['DEPT'] == student_key.DEPT assert sql_row['NAME'] == student_val.NAME diff --git a/tests/test_sql.py b/tests/test_sql.py index 15f84ee..c896afb 100644 --- a/tests/test_sql.py +++ b/tests/test_sql.py @@ -22,7 +22,9 @@ ) from pyignite.datatypes.prop_codes import * from pyignite.exceptions import SQLError -from pyignite.utils import entity_id, unwrap_binary +from pyignite.utils import entity_id +from pyignite.binary import unwrap_binary + initial_data = [ ('John', 'Doe', 5), diff --git a/tests/util.py b/tests/util.py index 1d6acd6..90f0146 100644 --- a/tests/util.py +++ b/tests/util.py @@ -15,6 +15,8 @@ import glob import os + +import jinja2 as jinja2 import psutil import re import signal @@ -72,22 +74,19 @@ def get_ignite_config_path(use_ssl=False): if use_ssl: file_name = "ignite-config-ssl.xml" else: - file_name = "ignite-config.xml" + file_name = "ignite-config.xml.jinja2" return os.path.join(get_test_dir(), "config", file_name) def check_server_started(idx=1): - log_file = os.path.join(get_test_dir(), "logs", f"ignite-log-{idx}.txt") - if not os.path.exists(log_file): - return False - pattern = re.compile('^Topology snapshot.*') - with open(log_file) as f: - for line in f.readlines(): - if pattern.match(line): - return True + for log_file in get_log_files(idx): + with open(log_file) as f: + for line in f.readlines(): + if pattern.match(line): + return True return False @@ -102,20 +101,33 @@ def kill_process_tree(pid): os.kill(pid, signal.SIGKILL) +templateLoader = jinja2.FileSystemLoader(searchpath=os.path.join(get_test_dir(), "config")) +templateEnv = jinja2.Environment(loader=templateLoader) + + +def create_config_file(tpl_name, file_name, **kwargs): + template = templateEnv.get_template(tpl_name) + with open(os.path.join(get_test_dir(), "config", file_name), mode='w') as f: + f.write(template.render(**kwargs)) + + def _start_ignite(idx=1, debug=False, use_ssl=False): clear_logs(idx) runner = get_ignite_runner() env = os.environ.copy() - env['IGNITE_INSTANCE_INDEX'] = str(idx) - env['IGNITE_CLIENT_PORT'] = str(10800 + idx) if debug: env["JVM_OPTS"] = "-Djava.net.preferIPv4Stack=true -Xdebug -Xnoagent -Djava.compiler=NONE " \ "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 " - ignite_cmd = [runner, get_ignite_config_path(use_ssl)] + params = {'ignite_instance_idx': str(idx), 'ignite_client_port': 10800 + idx, 'use_ssl': use_ssl} + + create_config_file('log4j.xml.jinja2', f'log4j-{idx}.xml', **params) + create_config_file('ignite-config.xml.jinja2', f'ignite-config-{idx}.xml', **params) + + ignite_cmd = [runner, os.path.join(get_test_dir(), "config", f'ignite-config-{idx}.xml')] print("Starting Ignite server node:", ignite_cmd) srv = subprocess.Popen(ignite_cmd, env=env, cwd=get_test_dir()) @@ -142,38 +154,3 @@ def get_log_files(idx=1): def clear_logs(idx=1): for f in get_log_files(idx): os.remove(f) - - -def read_log_file(file, idx): - i = -1 - with open(file) as f: - lines = f.readlines() - for line in lines: - i += 1 - - if i < read_log_file.last_line[idx]: - continue - - if i > read_log_file.last_line[idx]: - read_log_file.last_line[idx] = i - - # Example: Client request received [reqId=1, addr=/127.0.0.1:51694, - # req=org.apache.ignite.internal.processors.platform.client.cache.ClientCachePutRequest@1f33101e] - res = re.match("Client request received .*?req=org.apache.ignite.internal.processors." - "platform.client.cache.ClientCache([a-zA-Z]+)Request@", line) - - if res is not None: - yield res.group(1) - - -def get_request_grid_idx(message="Get"): - res = -1 - for i in range(1, 5): - for log_file in get_log_files(i): - for log in read_log_file(log_file, i): - if log == message: - res = i # Do not exit early to advance all log positions - return res - - -read_log_file.last_line = [0, 0, 0, 0, 0] \ No newline at end of file diff --git a/tox.ini b/tox.ini index 69db226..4361413 100644 --- a/tox.ini +++ b/tox.ini @@ -34,6 +34,10 @@ usedevelop = True commands = pytest {env:PYTESTARGS:} {posargs} +[jenkins] +setenv: + PYTESTARGS = --junitxml=junit-{envname}.xml + [no-ssl] setenv: PYTEST_ADDOPTS = --examples @@ -54,3 +58,18 @@ setenv: {[ssl]setenv} [testenv:py{36,37,38}-ssl-password] setenv: {[ssl-password]setenv} + +[testenv:py{36,37,38}-jenkins-no-ssl] +setenv: + {[no-ssl]setenv} + {[jenkins]setenv} + +[testenv:py{36,37,38}-jenkins-ssl] +setenv: + {[ssl]setenv} + {[jenkins]setenv} + +[testenv:py{36,37,38}-jenkins-ssl-password] +setenv: + {[ssl-password]setenv} + {[jenkins]setenv} From ba268ccbc747cc667eca2722c646b9395276e738 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Wed, 10 Feb 2021 15:38:56 +0300 Subject: [PATCH 14/62] IGNITE-14154 Remove unnecessary test, remove duplicates This closes #15 --- pyignite/queries/response.py | 23 +++++------------------ tests/config/ignite-config.xml.jinja2 | 11 ----------- tests/test_affinity_request_routing.py | 4 ---- 3 files changed, 5 insertions(+), 33 deletions(-) diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index 016f577..ca2ae14 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -19,7 +19,6 @@ import ctypes from pyignite.constants import RHF_TOPOLOGY_CHANGED, RHF_ERROR -from pyignite.connection import Connection from pyignite.datatypes import AnyDataObject, Bool, Int, Long, String, StringArray, Struct from pyignite.queries.op_codes import OP_SUCCESS from pyignite.stream import READ_BACKWARD @@ -30,6 +29,7 @@ class Response: following = attr.ib(type=list, factory=list) protocol_version = attr.ib(type=tuple, factory=tuple) _response_header = None + _response_class_name = 'Response' def __attrs_post_init__(self): # replace None with empty list @@ -88,19 +88,16 @@ def parse(self, stream): else: self._parse_success(stream, fields) - response_class = self._create_response_class(stream, header_class, fields) - stream.seek(init_pos + ctypes.sizeof(response_class)) - return self._create_response_class(stream, header_class, fields) - - def _create_response_class(self, stream, header_class, fields: list): response_class = type( - 'Response', + self._response_class_name, (header_class,), { '_pack_': 1, '_fields_': fields, } ) + + stream.seek(init_pos + ctypes.sizeof(response_class)) return response_class def _parse_success(self, stream, fields: list): @@ -130,6 +127,7 @@ class SQLResponse(Response): """ include_field_names = attr.ib(type=bool, default=False) has_cursor = attr.ib(type=bool, default=False) + _response_class_name = 'SQLResponse' def fields_or_field_count(self): if self.include_field_names: @@ -182,17 +180,6 @@ def _parse_success(self, stream, fields: list): ('more', ctypes.c_byte), ] - def _create_response_class(self, stream, header_class, fields: list): - final_class = type( - 'SQLResponse', - (header_class,), - { - '_pack_': 1, - '_fields_': fields, - } - ) - return final_class - def to_python(self, ctype_object, *args, **kwargs): if getattr(ctype_object, 'status_code', 0) == 0: result = { diff --git a/tests/config/ignite-config.xml.jinja2 b/tests/config/ignite-config.xml.jinja2 index 322a958..834b5d8 100644 --- a/tests/config/ignite-config.xml.jinja2 +++ b/tests/config/ignite-config.xml.jinja2 @@ -81,17 +81,6 @@ - - - - - - - - - - - diff --git a/tests/test_affinity_request_routing.py b/tests/test_affinity_request_routing.py index cd0c015..866222b 100644 --- a/tests/test_affinity_request_routing.py +++ b/tests/test_affinity_request_routing.py @@ -184,10 +184,6 @@ def test_cache_operation_routed_to_new_cluster_node(request, start_ignite_server kill_process_tree(srv.pid) -def test_unsupported_affinity_cache_operation_routed_to_random_node(client_partition_aware): - verify_random_node(client_partition_aware.get_cache("custom-affinity")) - - def test_replicated_cache_operation_routed_to_random_node(request, client_partition_aware): cache = client_partition_aware.get_or_create_cache({ PROP_NAME: request.node.name, From 7743b232cff2a652d2eefe3965faba789bb8203e Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Mon, 15 Feb 2021 15:31:39 +0300 Subject: [PATCH 15/62] IGNITE-14167 Simplify reconnecting, fix affinity topology change detection This closes #16 --- .travis.yml | 24 +- pyignite/api/affinity.py | 6 +- pyignite/cache.py | 4 + pyignite/client.py | 8 +- pyignite/connection/__init__.py | 410 +------------------------ pyignite/connection/connection.py | 381 +++++++++++++++++++++++ pyignite/datatypes/complex.py | 2 +- pyignite/datatypes/internal.py | 20 +- pyignite/queries/query.py | 8 +- pyignite/stream/binary_stream.py | 5 +- pyignite/utils.py | 25 -- tests/config/log4j.xml.jinja2 | 1 - tests/conftest.py | 4 +- tests/test_affinity_request_routing.py | 8 +- tox.ini | 6 - 15 files changed, 441 insertions(+), 471 deletions(-) create mode 100644 pyignite/connection/connection.py diff --git a/.travis.yml b/.travis.yml index f884bdb..3095941 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +language: python sudo: required addons: @@ -21,7 +22,9 @@ addons: - openjdk-8-jdk env: - - IGNITE_VERSION=2.9.1 IGNITE_HOME=/opt/ignite + global: + - IGNITE_VERSION=2.9.1 + - IGNITE_HOME=/opt/ignite before_install: - curl -L https://apache-mirror.rbc.ru/pub/apache/ignite/${IGNITE_VERSION}/apache-ignite-slim-${IGNITE_VERSION}-bin.zip > ignite.zip @@ -29,10 +32,17 @@ before_install: - mv /opt/apache-ignite-slim-${IGNITE_VERSION}-bin /opt/ignite - mv /opt/ignite/libs/optional/ignite-log4j2 /opt/ignite/libs/ -language: python -python: - - "3.6" - - "3.7" - - "3.8" -install: pip install tox-travis +jobs: + include: + - python: '3.6' + arch: amd64 + env: TOXENV=py36-no-ssl,py36-ssl,py36-ssl-password + - python: '3.7' + arch: amd64 + env: TOXENV=py37-no-ssl,py37-ssl,py37-ssl-password + - python: '3.8' + arch: amd64 + env: TOXENV=py38-no-ssl,py38-ssl,py38-ssl-password + +install: pip install tox script: tox \ No newline at end of file diff --git a/pyignite/api/affinity.py b/pyignite/api/affinity.py index 16148a1..7d09517 100644 --- a/pyignite/api/affinity.py +++ b/pyignite/api/affinity.py @@ -55,11 +55,13 @@ partition_mapping = StructArray([ ('is_applicable', Bool), - ('cache_mapping', Conditional(lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, + ('cache_mapping', Conditional(['is_applicable'], + lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, lambda ctx: ctx['is_applicable'], cache_mapping, empty_cache_mapping)), - ('node_mapping', Conditional(lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, + ('node_mapping', Conditional(['is_applicable'], + lambda ctx: ctx['is_applicable'] and ctx['is_applicable'].value == 1, lambda ctx: ctx['is_applicable'], node_mapping, empty_node_mapping)), ]) diff --git a/pyignite/cache.py b/pyignite/cache.py index dd7dac4..ea672a8 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -283,6 +283,10 @@ def get_best_node( parts += len(p) self.affinity['number_of_partitions'] = parts + + for conn in self.client._nodes: + if not conn.alive: + conn.reconnect() else: # get number of partitions parts = self.affinity.get('number_of_partitions') diff --git a/pyignite/client.py b/pyignite/client.py index 83cb196..77c6373 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -182,15 +182,9 @@ def connect(self, *args): if not self.partition_aware: # do not try to open more nodes self._current_node = i - else: - # take a chance to schedule the reconnection - # for all the failed connections, that was probed - # before this - for failed_node in self._nodes[:i]: - failed_node.reconnect() except connection_errors: - conn._fail() + conn.failed = True if self.partition_aware: # schedule the reconnection conn.reconnect() diff --git a/pyignite/connection/__init__.py b/pyignite/connection/__init__.py index 0e793f8..1114594 100644 --- a/pyignite/connection/__init__.py +++ b/pyignite/connection/__init__.py @@ -33,414 +33,6 @@ as well as Ignite protocol handshaking. """ -from collections import OrderedDict -import socket -from threading import RLock -from typing import Union - -from pyignite.constants import * -from pyignite.exceptions import ( - HandshakeError, ParameterError, SocketError, connection_errors, -) -from pyignite.datatypes import Byte, Int, Short, String, UUIDObject -from pyignite.datatypes.internal import Struct -from pyignite.utils import DaemonicTimer - -from .handshake import HandshakeRequest -from .ssl import wrap - +from .connection import Connection __all__ = ['Connection'] - -from ..stream import BinaryStream, READ_BACKWARD - - -class Connection: - """ - This is a `pyignite` class, that represents a connection to Ignite - node. It serves multiple purposes: - - * socket wrapper. Detects fragmentation and network errors. See also - https://docs.python.org/3/howto/sockets.html, - * binary protocol connector. Incapsulates handshake and failover reconnection. - """ - - _socket = None - _failed = None - _in_use = None - - client = None - host = None - port = None - timeout = None - username = None - password = None - ssl_params = {} - uuid = None - - @staticmethod - def _check_ssl_params(params): - expected_args = [ - 'use_ssl', - 'ssl_version', - 'ssl_ciphers', - 'ssl_cert_reqs', - 'ssl_keyfile', - 'ssl_keyfile_password', - 'ssl_certfile', - 'ssl_ca_certfile', - ] - for param in params: - if param not in expected_args: - raise ParameterError(( - 'Unexpected parameter for connection initialization: `{}`' - ).format(param)) - - def __init__( - self, client: 'Client', timeout: float = 2.0, - username: str = None, password: str = None, **ssl_params - ): - """ - Initialize connection. - - For the use of the SSL-related parameters see - https://docs.python.org/3/library/ssl.html#ssl-certificates. - - :param client: Ignite client object, - :param timeout: (optional) sets timeout (in seconds) for each socket - operation including `connect`. 0 means non-blocking mode, which is - virtually guaranteed to fail. Can accept integer or float value. - Default is None (blocking mode), - :param use_ssl: (optional) set to True if Ignite server uses SSL - on its binary connector. Defaults to use SSL when username - and password has been supplied, not to use SSL otherwise, - :param ssl_version: (optional) SSL version constant from standard - `ssl` module. Defaults to TLS v1.1, as in Ignite 2.5, - :param ssl_ciphers: (optional) ciphers to use. If not provided, - `ssl` default ciphers are used, - :param ssl_cert_reqs: (optional) determines how the remote side - certificate is treated: - - * `ssl.CERT_NONE` − remote certificate is ignored (default), - * `ssl.CERT_OPTIONAL` − remote certificate will be validated, - if provided, - * `ssl.CERT_REQUIRED` − valid remote certificate is required, - - :param ssl_keyfile: (optional) a path to SSL key file to identify - local (client) party, - :param ssl_keyfile_password: (optional) password for SSL key file, - can be provided when key file is encrypted to prevent OpenSSL - password prompt, - :param ssl_certfile: (optional) a path to ssl certificate file - to identify local (client) party, - :param ssl_ca_certfile: (optional) a path to a trusted certificate - or a certificate chain. Required to check the validity of the remote - (server-side) certificate, - :param username: (optional) user name to authenticate to Ignite - cluster, - :param password: (optional) password to authenticate to Ignite cluster. - """ - self.client = client - self.timeout = timeout - self.username = username - self.password = password - self._check_ssl_params(ssl_params) - if self.username and self.password and 'use_ssl' not in ssl_params: - ssl_params['use_ssl'] = True - self.ssl_params = ssl_params - self._failed = False - self._mux = RLock() - self._in_use = False - - @property - def socket(self) -> socket.socket: - """ Network socket. """ - return self._socket - - @property - def closed(self) -> bool: - """ Tells if socket is closed. """ - with self._mux: - return self._socket is None - - @property - def failed(self) -> bool: - """ Tells if connection is failed. """ - with self._mux: - return self._failed - - @property - def alive(self) -> bool: - """ Tells if connection is up and no failure detected. """ - with self._mux: - return not (self._failed or self.closed) - - def __repr__(self) -> str: - return '{}:{}'.format(self.host or '?', self.port or '?') - - _wrap = wrap - - def get_protocol_version(self): - """ - Returns the tuple of major, minor, and revision numbers of the used - thin protocol version, or None, if no connection to the Ignite cluster - was yet established. - """ - return self.client.protocol_version - - def _fail(self): - """ set client to failed state. """ - with self._mux: - self._failed = True - - self._in_use = False - - def read_response(self) -> Union[dict, OrderedDict]: - """ - Processes server's response to the handshake request. - - :return: handshake data. - """ - response_start = Struct([ - ('length', Int), - ('op_code', Byte), - ]) - with BinaryStream(self, self.recv()) as stream: - start_class = response_start.parse(stream) - start = stream.read_ctype(start_class, direction=READ_BACKWARD) - data = response_start.to_python(start) - response_end = None - if data['op_code'] == 0: - response_end = Struct([ - ('version_major', Short), - ('version_minor', Short), - ('version_patch', Short), - ('message', String), - ]) - elif self.get_protocol_version() >= (1, 4, 0): - response_end = Struct([ - ('node_uuid', UUIDObject), - ]) - if response_end: - end_class = response_end.parse(stream) - end = stream.read_ctype(end_class, direction=READ_BACKWARD) - data.update(response_end.to_python(end)) - return data - - def connect( - self, host: str = None, port: int = None - ) -> Union[dict, OrderedDict]: - """ - Connect to the given server node with protocol version fallback. - - :param host: Ignite server node's host name or IP, - :param port: Ignite server node's port number. - """ - detecting_protocol = False - - with self._mux: - if self._in_use: - raise ConnectionError('Connection is in use.') - self._in_use = True - - # choose highest version first - if self.client.protocol_version is None: - detecting_protocol = True - self.client.protocol_version = max(PROTOCOLS) - - try: - result = self._connect_version(host, port) - except HandshakeError as e: - if e.expected_version in PROTOCOLS: - self.client.protocol_version = e.expected_version - result = self._connect_version(host, port) - else: - raise e - except connection_errors: - # restore undefined protocol version - if detecting_protocol: - self.client.protocol_version = None - raise - - # connection is ready for end user - self.uuid = result.get('node_uuid', None) # version-specific (1.4+) - - self._failed = False - return result - - def _connect_version( - self, host: str = None, port: int = None, - ) -> Union[dict, OrderedDict]: - """ - Connect to the given server node using protocol version - defined on client. - - :param host: Ignite server node's host name or IP, - :param port: Ignite server node's port number. - """ - - host = host or IGNITE_DEFAULT_HOST - port = port or IGNITE_DEFAULT_PORT - - self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self._socket.settimeout(self.timeout) - self._socket = self._wrap(self.socket) - self._socket.connect((host, port)) - - protocol_version = self.client.protocol_version - - hs_request = HandshakeRequest( - protocol_version, - self.username, - self.password - ) - - with BinaryStream(self) as stream: - hs_request.from_python(stream) - self.send(stream.getbuffer()) - - hs_response = self.read_response() - if hs_response['op_code'] == 0: - # disconnect but keep in use - self.close(release=False) - - error_text = 'Handshake error: {}'.format(hs_response['message']) - # if handshake fails for any reason other than protocol mismatch - # (i.e. authentication error), server version is 0.0.0 - if any([ - hs_response['version_major'], - hs_response['version_minor'], - hs_response['version_patch'], - ]): - error_text += ( - ' Server expects binary protocol version ' - '{version_major}.{version_minor}.{version_patch}. Client ' - 'provides {client_major}.{client_minor}.{client_patch}.' - ).format( - client_major=protocol_version[0], - client_minor=protocol_version[1], - client_patch=protocol_version[2], - **hs_response - ) - raise HandshakeError(( - hs_response['version_major'], - hs_response['version_minor'], - hs_response['version_patch'], - ), error_text) - self.host, self.port = host, port - return hs_response - - def reconnect(self, seq_no=0): - """ - Tries to reconnect synchronously, then in background. - """ - - # stop trying to reconnect - if seq_no >= len(RECONNECT_BACKOFF_SEQUENCE): - self._failed = False - - self._reconnect() - - if self.failed: - DaemonicTimer( - RECONNECT_BACKOFF_SEQUENCE[seq_no], - self.reconnect, - kwargs={'seq_no': seq_no + 1}, - ).start() - - def _reconnect(self): - # do not reconnect if connection is already working - # or was closed on purpose - if not self.failed: - return - - self.close() - - # connect and silence the connection errors - try: - self.connect(self.host, self.port) - except connection_errors: - pass - - def _transfer_params(self, to: 'Connection'): - """ - Transfer non-SSL parameters to target connection object. - - :param to: connection object to transfer parameters to. - """ - to.username = self.username - to.password = self.password - to.client = self.client - to.host = self.host - to.port = self.port - - def send(self, data: Union[bytes, bytearray, memoryview], flags=None): - """ - Send data down the socket. - - :param data: bytes to send, - :param flags: (optional) OS-specific flags. - """ - if self.closed: - raise SocketError('Attempt to use closed connection.') - - kwargs = {} - if flags is not None: - kwargs['flags'] = flags - - try: - self.socket.sendall(data, **kwargs) - except Exception: - self._fail() - self.reconnect() - raise - - def recv(self, flags=None) -> bytearray: - def _recv(buffer, num_bytes): - bytes_to_receive = num_bytes - while bytes_to_receive > 0: - try: - bytes_rcvd = self.socket.recv_into(buffer, bytes_to_receive, **kwargs) - if bytes_rcvd == 0: - raise SocketError('Connection broken.') - except connection_errors: - self._fail() - self.reconnect() - raise - - buffer = buffer[bytes_rcvd:] - bytes_to_receive -= bytes_rcvd - - if self.closed: - raise SocketError('Attempt to use closed connection.') - - kwargs = {} - if flags is not None: - kwargs['flags'] = flags - - data = bytearray(4) - _recv(memoryview(data), 4) - response_len = int.from_bytes(data, PROTOCOL_BYTE_ORDER) - - data.extend(bytearray(response_len)) - _recv(memoryview(data)[4:], response_len) - return data - - - def close(self, release=True): - """ - Try to mark socket closed, then unlink it. This is recommended but - not required, since sockets are automatically closed when - garbage-collected. - """ - with self._mux: - if self._socket: - try: - self._socket.shutdown(socket.SHUT_RDWR) - self._socket.close() - except connection_errors: - pass - self._socket = None - - if release: - self._in_use = False diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py new file mode 100644 index 0000000..6ab6c6a --- /dev/null +++ b/pyignite/connection/connection.py @@ -0,0 +1,381 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import socket +from typing import Union + +from pyignite.constants import * +from pyignite.exceptions import ( + HandshakeError, ParameterError, SocketError, connection_errors, +) +from pyignite.datatypes import Byte, Int, Short, String, UUIDObject +from pyignite.datatypes.internal import Struct + +from .handshake import HandshakeRequest +from .ssl import wrap +from ..stream import BinaryStream, READ_BACKWARD + + +class Connection: + """ + This is a `pyignite` class, that represents a connection to Ignite + node. It serves multiple purposes: + + * socket wrapper. Detects fragmentation and network errors. See also + https://docs.python.org/3/howto/sockets.html, + * binary protocol connector. Incapsulates handshake and failover reconnection. + """ + + _socket = None + _failed = None + + client = None + host = None + port = None + timeout = None + username = None + password = None + ssl_params = {} + uuid = None + + @staticmethod + def _check_ssl_params(params): + expected_args = [ + 'use_ssl', + 'ssl_version', + 'ssl_ciphers', + 'ssl_cert_reqs', + 'ssl_keyfile', + 'ssl_keyfile_password', + 'ssl_certfile', + 'ssl_ca_certfile', + ] + for param in params: + if param not in expected_args: + raise ParameterError(( + 'Unexpected parameter for connection initialization: `{}`' + ).format(param)) + + def __init__( + self, client: 'Client', timeout: float = 2.0, + username: str = None, password: str = None, **ssl_params + ): + """ + Initialize connection. + + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + + :param client: Ignite client object, + :param timeout: (optional) sets timeout (in seconds) for each socket + operation including `connect`. 0 means non-blocking mode, which is + virtually guaranteed to fail. Can accept integer or float value. + Default is None (blocking mode), + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.1, as in Ignite 2.5, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. + """ + self.client = client + self.timeout = timeout + self.username = username + self.password = password + self._check_ssl_params(ssl_params) + if self.username and self.password and 'use_ssl' not in ssl_params: + ssl_params['use_ssl'] = True + self.ssl_params = ssl_params + self._failed = False + + @property + def closed(self) -> bool: + """ Tells if socket is closed. """ + return self._socket is None + + @property + def failed(self) -> bool: + """ Tells if connection is failed. """ + return self._failed + + @failed.setter + def failed(self, value): + self._failed = value + + @property + def alive(self) -> bool: + """ Tells if connection is up and no failure detected. """ + return not self.failed and not self.closed + + def __repr__(self) -> str: + return '{}:{}'.format(self.host or '?', self.port or '?') + + _wrap = wrap + + def get_protocol_version(self): + """ + Returns the tuple of major, minor, and revision numbers of the used + thin protocol version, or None, if no connection to the Ignite cluster + was yet established. + """ + return self.client.protocol_version + + def read_response(self) -> Union[dict, OrderedDict]: + """ + Processes server's response to the handshake request. + + :return: handshake data. + """ + response_start = Struct([ + ('length', Int), + ('op_code', Byte), + ]) + with BinaryStream(self, self.recv()) as stream: + start_class = response_start.parse(stream) + start = stream.read_ctype(start_class, direction=READ_BACKWARD) + data = response_start.to_python(start) + response_end = None + if data['op_code'] == 0: + response_end = Struct([ + ('version_major', Short), + ('version_minor', Short), + ('version_patch', Short), + ('message', String), + ]) + elif self.get_protocol_version() >= (1, 4, 0): + response_end = Struct([ + ('node_uuid', UUIDObject), + ]) + if response_end: + end_class = response_end.parse(stream) + end = stream.read_ctype(end_class, direction=READ_BACKWARD) + data.update(response_end.to_python(end)) + return data + + def connect( + self, host: str = None, port: int = None + ) -> Union[dict, OrderedDict]: + """ + Connect to the given server node with protocol version fallback. + + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number. + """ + detecting_protocol = False + + # choose highest version first + if self.client.protocol_version is None: + detecting_protocol = True + self.client.protocol_version = max(PROTOCOLS) + + try: + result = self._connect_version(host, port) + except HandshakeError as e: + if e.expected_version in PROTOCOLS: + self.client.protocol_version = e.expected_version + result = self._connect_version(host, port) + else: + raise e + except connection_errors: + # restore undefined protocol version + if detecting_protocol: + self.client.protocol_version = None + raise + + # connection is ready for end user + self.uuid = result.get('node_uuid', None) # version-specific (1.4+) + + self.failed = False + return result + + def _connect_version( + self, host: str = None, port: int = None, + ) -> Union[dict, OrderedDict]: + """ + Connect to the given server node using protocol version + defined on client. + + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number. + """ + + host = host or IGNITE_DEFAULT_HOST + port = port or IGNITE_DEFAULT_PORT + + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._socket.settimeout(self.timeout) + self._socket = self._wrap(self._socket) + self._socket.connect((host, port)) + + protocol_version = self.client.protocol_version + + hs_request = HandshakeRequest( + protocol_version, + self.username, + self.password + ) + + with BinaryStream(self) as stream: + hs_request.from_python(stream) + self.send(stream.getbuffer()) + + hs_response = self.read_response() + if hs_response['op_code'] == 0: + self.close() + + error_text = 'Handshake error: {}'.format(hs_response['message']) + # if handshake fails for any reason other than protocol mismatch + # (i.e. authentication error), server version is 0.0.0 + if any([ + hs_response['version_major'], + hs_response['version_minor'], + hs_response['version_patch'], + ]): + error_text += ( + ' Server expects binary protocol version ' + '{version_major}.{version_minor}.{version_patch}. Client ' + 'provides {client_major}.{client_minor}.{client_patch}.' + ).format( + client_major=protocol_version[0], + client_minor=protocol_version[1], + client_patch=protocol_version[2], + **hs_response + ) + raise HandshakeError(( + hs_response['version_major'], + hs_response['version_minor'], + hs_response['version_patch'], + ), error_text) + self.host, self.port = host, port + return hs_response + + def reconnect(self): + # do not reconnect if connection is already working + # or was closed on purpose + if not self.failed: + return + + self.close() + + # connect and silence the connection errors + try: + self.connect(self.host, self.port) + except connection_errors: + pass + + def send(self, data: Union[bytes, bytearray, memoryview], flags=None): + """ + Send data down the socket. + + :param data: bytes to send, + :param flags: (optional) OS-specific flags. + """ + if self.closed: + raise SocketError('Attempt to use closed connection.') + + kwargs = {} + if flags is not None: + kwargs['flags'] = flags + + try: + self._socket.sendall(data, **kwargs) + except connection_errors: + self.failed = True + self.reconnect() + raise + + def recv(self, flags=None) -> bytearray: + def _recv(buffer, num_bytes): + bytes_to_receive = num_bytes + while bytes_to_receive > 0: + try: + bytes_rcvd = self._socket.recv_into(buffer, bytes_to_receive, **kwargs) + if bytes_rcvd == 0: + raise SocketError('Connection broken.') + except connection_errors: + self.failed = True + self.reconnect() + raise + + buffer = buffer[bytes_rcvd:] + bytes_to_receive -= bytes_rcvd + + if self.closed: + raise SocketError('Attempt to use closed connection.') + + kwargs = {} + if flags is not None: + kwargs['flags'] = flags + + data = bytearray(4) + _recv(memoryview(data), 4) + response_len = int.from_bytes(data, PROTOCOL_BYTE_ORDER) + + data.extend(bytearray(response_len)) + _recv(memoryview(data)[4:], response_len) + return data + + def close(self): + """ + Try to mark socket closed, then unlink it. This is recommended but + not required, since sockets are automatically closed when + garbage-collected. + """ + if self._socket: + try: + self._socket.shutdown(socket.SHUT_RDWR) + self._socket.close() + except connection_errors: + pass + + self._socket = None diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index aed3cda..b8d9c02 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -564,8 +564,8 @@ def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): @classmethod def from_python_not_null(cls, stream, value): - stream.register_binary_type(value.__class__) if getattr(value, '_buffer', None): stream.write(value._buffer) else: + stream.register_binary_type(value.__class__) value._from_python(stream) diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 0111a22..a6da9fe 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -18,7 +18,7 @@ import decimal from datetime import date, datetime, timedelta from io import SEEK_CUR -from typing import Any, Tuple, Union, Callable +from typing import Any, Tuple, Union, Callable, List import uuid import attr @@ -115,8 +115,9 @@ def tc_map(key: bytes, _memo_map: dict = {}): class Conditional: - - def __init__(self, predicate1: Callable[[any], bool], predicate2: Callable[[any], bool], var1, var2): + def __init__(self, fields: List, predicate1: Callable[[any], bool], + predicate2: Callable[[any], bool], var1, var2): + self.fields = fields self.predicate1 = predicate1 self.predicate2 = predicate2 self.var1 = var1 @@ -209,12 +210,19 @@ class Struct: defaults = attr.ib(type=dict, default={}) def parse(self, stream): - fields, values = [], {} + fields, ctx = [], {} + + for _, c_type in self.fields: + if isinstance(c_type, Conditional): + for name in c_type.fields: + ctx[name] = None + for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) - c_type = c_type.parse(stream, values) if is_cond else c_type.parse(stream) + c_type = c_type.parse(stream, ctx) if is_cond else c_type.parse(stream) fields.append((name, c_type)) - values[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) + if name in ctx: + ctx[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) data_class = type( 'Struct', diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index 5bd114b..b5be753 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -105,9 +105,11 @@ def perform( # this test depends on protocol version if getattr(response, 'flags', False) & RHF_TOPOLOGY_CHANGED: # update latest affinity version - conn.client.affinity_version = ( - response.affinity_version, response.affinity_minor - ) + new_affinity = (response.affinity_version, response.affinity_minor) + old_affinity = conn.client.affinity_version + + if new_affinity > old_affinity: + conn.client.affinity_version = new_affinity # build result result = APIResult(response) diff --git a/pyignite/stream/binary_stream.py b/pyignite/stream/binary_stream.py index 1ecdcfb..46ac683 100644 --- a/pyignite/stream/binary_stream.py +++ b/pyignite/stream/binary_stream.py @@ -95,7 +95,10 @@ def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): - self.stream.close() + try: + self.stream.close() + except BufferError: + pass def get_dataclass(self, header): # get field names from outer space diff --git a/pyignite/utils.py b/pyignite/utils.py index 3d0378f..6c636ae 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -18,7 +18,6 @@ import warnings from functools import wraps -from threading import Event, Thread from typing import Any, Optional, Type, Tuple, Union from pyignite.datatypes.base import IgniteDataType @@ -255,30 +254,6 @@ def unsigned(value: int, c_type: ctypes._SimpleCData = ctypes.c_uint) -> int: return c_type(value).value -class DaemonicTimer(Thread): - """ - Same as normal `threading.Timer`, but do not delay the program exit. - """ - - def __init__(self, interval, function, args=None, kwargs=None): - Thread.__init__(self, daemon=True) - self.interval = interval - self.function = function - self.args = args if args is not None else [] - self.kwargs = kwargs if kwargs is not None else {} - self.finished = Event() - - def cancel(self): - """Stop the timer if it hasn't finished yet.""" - self.finished.set() - - def run(self): - self.finished.wait(self.interval) - if not self.finished.is_set(): - self.function(*self.args, **self.kwargs) - self.finished.set() - - def capitalize(string: str) -> str: """ Capitalizing the string, assuming the first character is a letter. diff --git a/tests/config/log4j.xml.jinja2 b/tests/config/log4j.xml.jinja2 index 628f66c..983ae9e 100644 --- a/tests/config/log4j.xml.jinja2 +++ b/tests/config/log4j.xml.jinja2 @@ -33,7 +33,6 @@ - diff --git a/tests/conftest.py b/tests/conftest.py index 54a7fda..bc8804d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -120,7 +120,7 @@ def client_partition_aware_single_server( password ): node = node[:1] - yield from client(node, timeout, True, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, + yield from client0(node, timeout, True, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, password) @@ -211,7 +211,7 @@ def pytest_addoption(parser): '--timeout', action='store', type=float, - default=None, + default=2.0, help=( 'Timeout (in seconds) for each socket operation. Can accept ' 'integer or float value. Default is None' diff --git a/tests/test_affinity_request_routing.py b/tests/test_affinity_request_routing.py index 866222b..3489dea 100644 --- a/tests/test_affinity_request_routing.py +++ b/tests/test_affinity_request_routing.py @@ -18,6 +18,7 @@ from pyignite import * from pyignite.connection import Connection +from pyignite.constants import PROTOCOL_BYTE_ORDER from pyignite.datatypes import * from pyignite.datatypes.cache_config import CacheMode from pyignite.datatypes.prop_codes import * @@ -30,7 +31,12 @@ def patched_send(self, *args, **kwargs): """Patched send function that push to queue idx of server to which request is routed.""" - requests.append(self.port % 100) + buf = args[0] + if buf and len(buf) >= 6: + op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER) + # Filter only caches operation. + if 1000 <= op_code < 1100: + requests.append(self.port % 100) return old_send(self, *args, **kwargs) diff --git a/tox.ini b/tox.ini index 4361413..eb7d1a6 100644 --- a/tox.ini +++ b/tox.ini @@ -17,12 +17,6 @@ skipsdist = True envlist = py{36,37,38}-{no-ssl,ssl,ssl-password} -[travis] -python = - 3.6: py36-{no-ssl,ssl,ssl-password} - 3.7: py37-{no-ssl,ssl,ssl-password} - 3.8: py38-{no-ssl,ssl,ssl-password} - [testenv] passenv = TEAMCITY_VERSION IGNITE_HOME envdir = {homedir}/.virtualenvs/pyignite-{envname} From e5ca3fceb79a6c2e01c7480e56df162088fbedc0 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Thu, 18 Feb 2021 14:32:03 +0300 Subject: [PATCH 16/62] IGNITE-14186 Implement C module to speedup hashcode This closes #17 --- .gitignore | 4 + MANIFEST.in | 2 + README.md | 13 +++ cext/cutils.c | 193 +++++++++++++++++++++++++++++++++++++++ pyignite/api/binary.py | 16 +--- pyignite/utils.py | 26 +++++- requirements/install.txt | 1 - scripts/build_wheels.sh | 49 ++++++++++ scripts/create_distr.sh | 86 +++++++++++++++++ scripts/create_sdist.sh | 35 +++++++ setup.py | 140 ++++++++++++++++++---------- tests/conftest.py | 23 ++++- tests/test_cutils.py | 136 +++++++++++++++++++++++++++ tox.ini | 2 +- 14 files changed, 661 insertions(+), 65 deletions(-) create mode 100644 MANIFEST.in create mode 100644 cext/cutils.c create mode 100755 scripts/build_wheels.sh create mode 100755 scripts/create_distr.sh create mode 100755 scripts/create_sdist.sh create mode 100644 tests/test_cutils.py diff --git a/.gitignore b/.gitignore index d28510c..699c26d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,12 @@ .idea +.benchmarks .vscode .eggs .pytest_cache .tox +*.so +build +distr tests/config/*.xml junit*.xml pyignite.egg-info diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..783a2fe --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include requirements * +include README.md diff --git a/README.md b/README.md index 24f7b4e..47bd712 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,19 @@ $ pip install -r requirements/.txt You may also want to consult the `setuptools` manual about using `setup.py`. +### *optional C extension* +There is an optional C extension to speedup some computational intensive tasks. If it's compilation fails +(missing compiler or CPython headers), `pyignite` will be installed without this module. + +- On Linux or MacOS X only C compiler is required (`gcc` or `clang`). It compiles during standard setup process. +- For building universal `wheels` (binary packages) for Linux, just invoke script `./scripts/create_distr.sh`. + + ***NB!* Docker is required.** + + Ready wheels for `x86` and `x86-64` for different python versions (3.6, 3.7, 3.8 and 3.9) will be + located in `./distr` directory. + + ### Updating from older version To upgrade an existing package, use the following command: diff --git a/cext/cutils.c b/cext/cutils.c new file mode 100644 index 0000000..0106edc --- /dev/null +++ b/cext/cutils.c @@ -0,0 +1,193 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include + +#ifdef _MSC_VER + +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; + +#else +#include +#endif + +static int32_t FNV1_OFFSET_BASIS = 0x811c9dc5; +static int32_t FNV1_PRIME = 0x01000193; + + +PyObject* hashcode(PyObject* self, PyObject *args); +PyObject* schema_id(PyObject* self, PyObject *args); + +PyObject* str_hashcode(PyObject* data); +int32_t str_hashcode_(PyObject* data, int lower); +PyObject* b_hashcode(PyObject* data); + +static PyMethodDef methods[] = { + {"hashcode", (PyCFunction) hashcode, METH_VARARGS, ""}, + {"schema_id", (PyCFunction) schema_id, METH_VARARGS, ""}, + {NULL, NULL, 0, NULL} /* Sentinel */ +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cutils", + 0, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + NULL, /* m_slots */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL, /* m_free */ +}; + +static char* hashcode_input_err = "supported only strings, bytearrays, bytes and memoryview"; +static char* schema_id_input_err = "input argument must be dict or int"; +static char* schema_field_type_err = "schema keys must be strings"; + +PyMODINIT_FUNC PyInit__cutils(void) { + return PyModule_Create(&moduledef); +} + +PyObject* hashcode(PyObject* self, PyObject *args) { + PyObject* data; + + if (!PyArg_ParseTuple(args, "O", &data)) { + return NULL; + } + + if (data == Py_None) { + return PyLong_FromLong(0); + } + else if (PyUnicode_CheckExact(data)) { + return str_hashcode(data); + } + else { + return b_hashcode(data); + } +} + +PyObject* str_hashcode(PyObject* data) { + return PyLong_FromLong(str_hashcode_(data, 0)); +} + +int32_t str_hashcode_(PyObject *str, int lower) { + int32_t res = 0; + + Py_ssize_t sz = PyUnicode_GET_LENGTH(str); + if (!sz) { + return res; + } + + int kind = PyUnicode_KIND(str); + void* buf = PyUnicode_DATA(str); + + Py_ssize_t i; + for (i = 0; i < sz; i++) { + Py_UCS4 ch = PyUnicode_READ(kind, buf, i); + + if (lower) { + ch = Py_UNICODE_TOLOWER(ch); + } + + res = 31 * res + ch; + } + + return res; +} + +PyObject* b_hashcode(PyObject* data) { + int32_t res = 1; + Py_ssize_t sz; char* buf; + + if (PyBytes_CheckExact(data)) { + sz = PyBytes_GET_SIZE(data); + buf = PyBytes_AS_STRING(data); + } + else if (PyByteArray_CheckExact(data)) { + sz = PyByteArray_GET_SIZE(data); + buf = PyByteArray_AS_STRING(data); + } + else if (PyMemoryView_Check(data)) { + Py_buffer* pyBuf = PyMemoryView_GET_BUFFER(data); + sz = pyBuf->len; + buf = (char*)pyBuf->buf; + } + else { + PyErr_SetString(PyExc_ValueError, hashcode_input_err); + return NULL; + } + + Py_ssize_t i; + for (i = 0; i < sz; i++) { + res = 31 * res + (signed char)buf[i]; + } + + return PyLong_FromLong(res); +} + +PyObject* schema_id(PyObject* self, PyObject *args) { + PyObject* data; + + if (!PyArg_ParseTuple(args, "O", &data)) { + return NULL; + } + + if (PyLong_CheckExact(data)) { + return PyNumber_Long(data); + } + else if (data == Py_None) { + return PyLong_FromLong(0); + } + else if (PyDict_Check(data)) { + Py_ssize_t sz = PyDict_Size(data); + + if (sz == 0) { + return PyLong_FromLong(0); + } + + int32_t s_id = FNV1_OFFSET_BASIS; + + PyObject *key, *value; + Py_ssize_t pos = 0; + + while (PyDict_Next(data, &pos, &key, &value)) { + if (!PyUnicode_CheckExact(key)) { + PyErr_SetString(PyExc_ValueError, schema_field_type_err); + return NULL; + } + + int32_t field_id = str_hashcode_(key, 1); + s_id ^= field_id & 0xff; + s_id *= FNV1_PRIME; + s_id ^= (field_id >> 8) & 0xff; + s_id *= FNV1_PRIME; + s_id ^= (field_id >> 16) & 0xff; + s_id *= FNV1_PRIME; + s_id ^= (field_id >> 24) & 0xff; + s_id *= FNV1_PRIME; + } + + return PyLong_FromLong(s_id); + } + else { + PyErr_SetString(PyExc_ValueError, schema_id_input_err); + return NULL; + } +} diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index 0e63c17..87a5232 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -22,7 +22,7 @@ from pyignite.datatypes import String, Int, Bool from pyignite.queries import Query from pyignite.queries.op_codes import * -from pyignite.utils import int_overflow, entity_id +from pyignite.utils import entity_id, schema_id from .result import APIResult from ..stream import BinaryStream, READ_BACKWARD from ..queries.response import Response @@ -137,7 +137,7 @@ def put_binary_type( 'is_enum': is_enum, 'schema': [], } - schema_id = None + s_id = None if is_enum: data['enums'] = [] for literal, ordinal in schema.items(): @@ -147,7 +147,7 @@ def put_binary_type( }) else: # assemble schema and calculate schema ID in one go - schema_id = FNV1_OFFSET_BASIS if schema else 0 + s_id = schema_id(schema) for field_name, data_type in schema.items(): # TODO: check for allowed data types field_id = entity_id(field_name) @@ -159,17 +159,9 @@ def put_binary_type( ), 'field_id': field_id, }) - schema_id ^= (field_id & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) - schema_id ^= ((field_id >> 8) & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) - schema_id ^= ((field_id >> 16) & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) - schema_id ^= ((field_id >> 24) & 0xff) - schema_id = int_overflow(schema_id * FNV1_PRIME) data['schema'].append({ - 'schema_id': schema_id, + 'schema_id': s_id, 'schema_fields': [ {'schema_field_id': entity_id(x)} for x in schema ], diff --git a/pyignite/utils.py b/pyignite/utils.py index 6c636ae..67f164f 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -23,6 +23,13 @@ from pyignite.datatypes.base import IgniteDataType from .constants import * +FALLBACK = False + +try: + from pyignite import _cutils +except ImportError: + FALLBACK = True + LONG_MASK = 0xffffffff DIGITS_PER_INT = 9 @@ -91,6 +98,13 @@ def hashcode(data: Union[str, bytes, bytearray, memoryview]) -> int: :param data: UTF-8-encoded string identifier of binary buffer or byte array :return: hash code. """ + if FALLBACK: + return __hashcode_fallback(data) + + return _cutils.hashcode(data) + + +def __hashcode_fallback(data: Union[str, bytes, bytearray, memoryview]) -> int: if isinstance(data, str): """ For strings we iterate over code point which are of the int type @@ -147,13 +161,21 @@ def schema_id(schema: Union[int, dict]) -> int: :param schema: a dict of field names: field types, :return: schema ID. """ - if type(schema) is int: + if FALLBACK: + return __schema_id_fallback(schema) + return _cutils.schema_id(schema) + + +def __schema_id_fallback(schema: Union[int, dict]) -> int: + if isinstance(schema, int): return schema + if schema is None: return 0 + s_id = FNV1_OFFSET_BASIS if schema else 0 for field_name in schema.keys(): - field_id = entity_id(field_name) + field_id = __hashcode_fallback(field_name.lower()) s_id ^= (field_id & 0xff) s_id = int_overflow(s_id * FNV1_PRIME) s_id ^= ((field_id >> 8) & 0xff) diff --git a/requirements/install.txt b/requirements/install.txt index 9b87ae8..cecea8f 100644 --- a/requirements/install.txt +++ b/requirements/install.txt @@ -1,4 +1,3 @@ # these pip packages are necessary for the pyignite to run -typing==3.6.6; python_version<'3.5' attrs==18.1.0 diff --git a/scripts/build_wheels.sh b/scripts/build_wheels.sh new file mode 100755 index 0000000..cf5f760 --- /dev/null +++ b/scripts/build_wheels.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e -u -x + +function repair_wheel { + wheel="$1" + if ! auditwheel show "$wheel"; then + echo "Skipping non-platform wheel $wheel" + else + auditwheel repair "$wheel" --plat "$PLAT" -w /wheels + fi +} + +# Compile wheels +for PYBIN in /opt/python/*/bin; do + if [[ $PYBIN =~ ^(.*)cp3[6789](.*)$ ]]; then + "${PYBIN}/pip" wheel /pyignite/ --no-deps -w /wheels + fi +done + +# Bundle external shared libraries into the wheels +for whl in /wheels/*.whl; do + repair_wheel "$whl" +done + +for whl in /wheels/*.whl; do + if [[ ! $whl =~ ^(.*)manylinux(.*)$ ]]; then + rm "$whl" + else + chmod 666 "$whl" + fi +done + +rm -rf /pyignite/*.egg-info +rm -rf /pyignite/.eggs diff --git a/scripts/create_distr.sh b/scripts/create_distr.sh new file mode 100755 index 0000000..5732aba --- /dev/null +++ b/scripts/create_distr.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DISTR_DIR="$(pwd)/distr/" +SRC_DIR="$(pwd)" +DEFAULT_DOCKER_IMAGE="quay.io/pypa/manylinux1_x86_64" + +usage() { + cat < Date: Fri, 19 Feb 2021 12:36:16 +0300 Subject: [PATCH 17/62] IGNITE-14211 Remove existing cache requirement from SQL API This closes #18 --- pyignite/api/sql.py | 23 +++++------ pyignite/client.py | 18 ++++++--- pyignite/utils.py | 3 ++ tests/test_binary.py | 5 --- tests/test_cache_class.py | 4 +- tests/test_sql.py | 84 ++++++++++++++++++++++++++++++++++----- 6 files changed, 98 insertions(+), 39 deletions(-) diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index 73cacc6..dc470d1 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -283,36 +283,31 @@ def sql_fields( Performs SQL fields query. :param conn: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache: name or ID of the cache. If zero, then schema is used. :param query_str: SQL query string, :param page_size: cursor page size, :param query_args: (optional) query arguments. List of values or (value, type hint) tuples, - :param schema: (optional) schema for the query. Defaults to `PUBLIC`, + :param schema: schema for the query. :param statement_type: (optional) statement type. Can be: * StatementType.ALL − any type (default), * StatementType.SELECT − select, * StatementType.UPDATE − update. - :param distributed_joins: (optional) distributed joins. Defaults to False, + :param distributed_joins: (optional) distributed joins. :param local: (optional) pass True if this query should be executed - on local node only. Defaults to False, + on local node only. :param replicated_only: (optional) whether query contains only - replicated tables or not. Defaults to False, - :param enforce_join_order: (optional) enforce join order. Defaults - to False, + replicated tables or not. + :param enforce_join_order: (optional) enforce join order. :param collocated: (optional) whether your data is co-located or not. - Defaults to False, - :param lazy: (optional) lazy query execution. Defaults to False, + :param lazy: (optional) lazy query execution. :param include_field_names: (optional) include field names in result. - Defaults to False, - :param max_rows: (optional) query-wide maximum of rows. Defaults to -1 - (all rows), + :param max_rows: (optional) query-wide maximum of rows. :param timeout: (optional) non-negative timeout value in ms. Zero disables - timeout (default), + timeout. :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, diff --git a/pyignite/client.py b/pyignite/client.py index 77c6373..9416474 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -58,7 +58,7 @@ BinaryTypeError, CacheError, ReconnectError, SQLError, connection_errors, ) from .utils import ( - capitalize, entity_id, schema_id, process_delimiter, + cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, ) from .binary import GenericObjectMeta @@ -513,13 +513,14 @@ def get_cache_names(self) -> list: return cache_get_names(self.random_node) def sql( - self, query_str: str, page_size: int = 1024, query_args: Iterable = None, - schema: Union[int, str] = 'PUBLIC', + self, query_str: str, page_size: int = 1024, + query_args: Iterable = None, schema: str = 'PUBLIC', statement_type: int = 0, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, timeout: int = 0, + cache: Union[int, str, Cache] = None ): """ Runs an SQL query and returns its result. @@ -553,6 +554,8 @@ def sql( (all rows), :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), + :param cache (optional) Name or ID of the cache to use to infer schema. + If set, 'schema' argument is ignored, :return: generator with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ @@ -580,10 +583,13 @@ def generate_result(value): conn = self.random_node - schema = self.get_cache(schema) + c_id = cache.cache_id if isinstance(cache, Cache) else cache_id(cache) + + if c_id != 0: + schema = None + result = sql_fields( - conn, schema.cache_id, query_str, - page_size, query_args, schema.name, + conn, c_id, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, diff --git a/pyignite/utils.py b/pyignite/utils.py index 67f164f..f1a7f90 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -105,6 +105,9 @@ def hashcode(data: Union[str, bytes, bytearray, memoryview]) -> int: def __hashcode_fallback(data: Union[str, bytes, bytearray, memoryview]) -> int: + if data is None: + return 0 + if isinstance(data, str): """ For strings we iterate over code point which are of the int type diff --git a/tests/test_binary.py b/tests/test_binary.py index 45d1d25..5fa2ec4 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -63,8 +63,6 @@ def test_sql_read_as_binary(client): - - client.get_or_create_cache(scheme_name) client.sql(drop_query) # create table @@ -92,9 +90,6 @@ def test_sql_read_as_binary(client): def test_sql_write_as_binary(client): - - client.get_or_create_cache(scheme_name) - # configure cache as an SQL table type_name = table_cache_name diff --git a/tests/test_cache_class.py b/tests/test_cache_class.py index 1df0d44..940160a 100644 --- a/tests/test_cache_class.py +++ b/tests/test_cache_class.py @@ -62,9 +62,7 @@ def test_cache_remove(client): def test_cache_get(client): - client.get_or_create_cache('my_cache') - - my_cache = client.get_cache('my_cache') + my_cache = client.get_or_create_cache('my_cache') assert my_cache.settings[PROP_NAME] == 'my_cache' my_cache.destroy() diff --git a/tests/test_sql.py b/tests/test_sql.py index c896afb..f25fedd 100644 --- a/tests/test_sql.py +++ b/tests/test_sql.py @@ -20,12 +20,12 @@ sql, sql_cursor_get_page, cache_get_configuration, ) +from pyignite.datatypes.cache_config import CacheMode from pyignite.datatypes.prop_codes import * from pyignite.exceptions import SQLError from pyignite.utils import entity_id from pyignite.binary import unwrap_binary - initial_data = [ ('John', 'Doe', 5), ('Jane', 'Roe', 4), @@ -59,9 +59,10 @@ def test_sql(client): result = sql_fields( conn, - 'PUBLIC', + 0, create_query, page_size, + schema='PUBLIC', include_field_names=True ) assert result.status == 0, result.message @@ -70,9 +71,10 @@ def test_sql(client): fname, lname, grade = data_line result = sql_fields( conn, - 'PUBLIC', + 0, insert_query, page_size, + schema='PUBLIC', query_args=[i, fname, lname, grade], include_field_names=True ) @@ -108,7 +110,7 @@ def test_sql(client): assert data.type_id == entity_id(binary_type_name) # repeat cleanup - result = sql_fields(conn, 'PUBLIC', drop_query, page_size) + result = sql_fields(conn, 0, drop_query, page_size, schema='PUBLIC') assert result.status == 0 @@ -121,9 +123,10 @@ def test_sql_fields(client): result = sql_fields( conn, - 'PUBLIC', + 0, create_query, page_size, + schema='PUBLIC', include_field_names=True ) assert result.status == 0, result.message @@ -132,9 +135,10 @@ def test_sql_fields(client): fname, lname, grade = data_line result = sql_fields( conn, - 'PUBLIC', + 0, insert_query, page_size, + schema='PUBLIC', query_args=[i, fname, lname, grade], include_field_names=True ) @@ -142,9 +146,10 @@ def test_sql_fields(client): result = sql_fields( conn, - 'PUBLIC', + 0, select_query, page_size, + schema='PUBLIC', include_field_names=True ) assert result.status == 0 @@ -159,7 +164,7 @@ def test_sql_fields(client): assert result.value['more'] is False # repeat cleanup - result = sql_fields(conn, 'PUBLIC', drop_query, page_size) + result = sql_fields(conn, 0, drop_query, page_size, schema='PUBLIC') assert result.status == 0 @@ -176,7 +181,7 @@ def test_long_multipage_query(client): client.sql('DROP TABLE LongMultipageQuery IF EXISTS') - client.sql("CREATE TABLE LongMultiPageQuery (%s, %s)" % \ + client.sql("CREATE TABLE LongMultiPageQuery (%s, %s)" % (fields[0] + " INT(11) PRIMARY KEY", ",".join(map(lambda f: f + " INT(11)", fields[1:])))) for id in range(1, 21): @@ -193,6 +198,63 @@ def test_long_multipage_query(client): client.sql(drop_query) -def test_sql_not_create_cache(client): +def test_sql_not_create_cache_with_schema(client): with pytest.raises(SQLError, match=r".*Cache does not exist.*"): - client.sql(schema='IS_NOT_EXISTING', query_str='select * from IsNotExisting') + client.sql(schema=None, cache='NOT_EXISTING', query_str='select * from NotExisting') + + +def test_sql_not_create_cache_with_cache(client): + with pytest.raises(SQLError, match=r".*Failed to set schema.*"): + client.sql(schema='NOT_EXISTING', query_str='select * from NotExisting') + + +def test_query_with_cache(client): + test_key = 42 + test_value = 'Lorem ipsum' + + cache_name = test_query_with_cache.__name__.upper() + schema_name = f'{cache_name}_schema'.upper() + table_name = f'{cache_name}_table'.upper() + + cache = client.create_cache({ + PROP_NAME: cache_name, + PROP_SQL_SCHEMA: schema_name, + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_QUERY_ENTITIES: [ + { + 'table_name': table_name, + 'key_field_name': 'KEY', + 'value_field_name': 'VALUE', + 'key_type_name': 'java.lang.Long', + 'value_type_name': 'java.lang.String', + 'query_indexes': [], + 'field_name_aliases': [], + 'query_fields': [ + { + 'name': 'KEY', + 'type_name': 'java.lang.Long', + 'is_key_field': True, + 'is_notnull_constraint_field': True, + }, + { + 'name': 'VALUE', + 'type_name': 'java.lang.String', + }, + ], + }, + ], + }) + + cache.put(test_key, test_value) + + args_to_check = [ + ('schema', schema_name), + ('cache', cache), + ('cache', cache.name), + ('cache', cache.cache_id) + ] + + for param, value in args_to_check: + page = client.sql(f'select value from {table_name}', **{param: value}) + received = next(page)[0] + assert test_value == received From b2030beea23d18aa54d98226cd52911c395e00cb Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Thu, 25 Feb 2021 15:11:10 +0300 Subject: [PATCH 18/62] IGNITE-14240 Re-factor tests Handle authentication error. Fix infinite recursion on failed connection on handshake. Skip affinity test if server doesn't support protocol. Remove travis. This closes #19 --- .travis.yml | 48 --- pyignite/connection/connection.py | 25 +- pyignite/constants.py | 2 +- pyignite/exceptions.py | 9 + requirements/install.txt | 2 +- requirements/setup.txt | 2 +- requirements/tests.txt | 8 +- tests/affinity/conftest.py | 72 ++++ tests/{ => affinity}/test_affinity.py | 43 +-- .../test_affinity_bad_servers.py | 27 +- .../test_affinity_request_routing.py | 26 +- .../test_affinity_single_connection.py | 16 +- tests/common/conftest.py | 56 +++ tests/{ => common}/test_binary.py | 0 tests/{ => common}/test_cache_class.py | 0 tests/{ => common}/test_cache_class_sql.py | 0 .../test_cache_composite_key_class_sql.py | 0 tests/{ => common}/test_cache_config.py | 0 tests/{ => common}/test_datatypes.py | 0 tests/{ => common}/test_generic_object.py | 0 tests/{ => common}/test_get_names.py | 0 tests/{ => common}/test_key_value.py | 0 tests/{ => common}/test_scan.py | 0 tests/{ => common}/test_sql.py | 2 +- tests/config/ignite-config.xml.jinja2 | 14 + tests/conftest.py | 318 +----------------- tests/security/conftest.py | 49 +++ tests/security/test_auth.py | 63 ++++ tests/security/test_ssl.py | 56 +++ tests/test_examples.py | 41 +-- tests/util.py | 45 ++- tox.ini | 42 +-- 32 files changed, 472 insertions(+), 494 deletions(-) delete mode 100644 .travis.yml create mode 100644 tests/affinity/conftest.py rename tests/{ => affinity}/test_affinity.py (80%) rename tests/{ => affinity}/test_affinity_bad_servers.py (66%) rename tests/{ => affinity}/test_affinity_request_routing.py (89%) rename tests/{ => affinity}/test_affinity_single_connection.py (90%) create mode 100644 tests/common/conftest.py rename tests/{ => common}/test_binary.py (100%) rename tests/{ => common}/test_cache_class.py (100%) rename tests/{ => common}/test_cache_class_sql.py (100%) rename tests/{ => common}/test_cache_composite_key_class_sql.py (100%) rename tests/{ => common}/test_cache_config.py (100%) rename tests/{ => common}/test_datatypes.py (100%) rename tests/{ => common}/test_generic_object.py (100%) rename tests/{ => common}/test_get_names.py (100%) rename tests/{ => common}/test_key_value.py (100%) rename tests/{ => common}/test_scan.py (100%) rename tests/{ => common}/test_sql.py (98%) create mode 100644 tests/security/conftest.py create mode 100644 tests/security/test_auth.py create mode 100644 tests/security/test_ssl.py diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 3095941..0000000 --- a/.travis.yml +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -language: python -sudo: required - -addons: - apt: - packages: - - openjdk-8-jdk - -env: - global: - - IGNITE_VERSION=2.9.1 - - IGNITE_HOME=/opt/ignite - -before_install: - - curl -L https://apache-mirror.rbc.ru/pub/apache/ignite/${IGNITE_VERSION}/apache-ignite-slim-${IGNITE_VERSION}-bin.zip > ignite.zip - - unzip ignite.zip -d /opt - - mv /opt/apache-ignite-slim-${IGNITE_VERSION}-bin /opt/ignite - - mv /opt/ignite/libs/optional/ignite-log4j2 /opt/ignite/libs/ - -jobs: - include: - - python: '3.6' - arch: amd64 - env: TOXENV=py36-no-ssl,py36-ssl,py36-ssl-password - - python: '3.7' - arch: amd64 - env: TOXENV=py37-no-ssl,py37-ssl,py37-ssl-password - - python: '3.8' - arch: amd64 - env: TOXENV=py38-no-ssl,py38-ssl,py38-ssl-password - -install: pip install tox -script: tox \ No newline at end of file diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 6ab6c6a..8db304e 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -34,7 +34,7 @@ from pyignite.constants import * from pyignite.exceptions import ( - HandshakeError, ParameterError, SocketError, connection_errors, + HandshakeError, ParameterError, SocketError, connection_errors, AuthenticationError, ) from pyignite.datatypes import Byte, Int, Short, String, UUIDObject from pyignite.datatypes.internal import Struct @@ -43,6 +43,8 @@ from .ssl import wrap from ..stream import BinaryStream, READ_BACKWARD +CLIENT_STATUS_AUTH_FAILURE = 2000 + class Connection: """ @@ -180,7 +182,7 @@ def read_response(self) -> Union[dict, OrderedDict]: ('length', Int), ('op_code', Byte), ]) - with BinaryStream(self, self.recv()) as stream: + with BinaryStream(self, self.recv(reconnect=False)) as stream: start_class = response_start.parse(stream) start = stream.read_ctype(start_class, direction=READ_BACKWARD) data = response_start.to_python(start) @@ -191,6 +193,7 @@ def read_response(self) -> Union[dict, OrderedDict]: ('version_minor', Short), ('version_patch', Short), ('message', String), + ('client_status', Int) ]) elif self.get_protocol_version() >= (1, 4, 0): response_end = Struct([ @@ -267,7 +270,7 @@ def _connect_version( with BinaryStream(self) as stream: hs_request.from_python(stream) - self.send(stream.getbuffer()) + self.send(stream.getbuffer(), reconnect=False) hs_response = self.read_response() if hs_response['op_code'] == 0: @@ -291,6 +294,8 @@ def _connect_version( client_patch=protocol_version[2], **hs_response ) + elif hs_response['client_status'] == CLIENT_STATUS_AUTH_FAILURE: + raise AuthenticationError(error_text) raise HandshakeError(( hs_response['version_major'], hs_response['version_minor'], @@ -313,12 +318,13 @@ def reconnect(self): except connection_errors: pass - def send(self, data: Union[bytes, bytearray, memoryview], flags=None): + def send(self, data: Union[bytes, bytearray, memoryview], flags=None, reconnect=True): """ Send data down the socket. :param data: bytes to send, :param flags: (optional) OS-specific flags. + :param reconnect: (optional) reconnect on failure, default True. """ if self.closed: raise SocketError('Attempt to use closed connection.') @@ -334,7 +340,13 @@ def send(self, data: Union[bytes, bytearray, memoryview], flags=None): self.reconnect() raise - def recv(self, flags=None) -> bytearray: + def recv(self, flags=None, reconnect=True) -> bytearray: + """ + Receive data from the socket. + + :param flags: (optional) OS-specific flags. + :param reconnect: (optional) reconnect on failure, default True. + """ def _recv(buffer, num_bytes): bytes_to_receive = num_bytes while bytes_to_receive > 0: @@ -344,7 +356,8 @@ def _recv(buffer, num_bytes): raise SocketError('Connection broken.') except connection_errors: self.failed = True - self.reconnect() + if reconnect: + self.reconnect() raise buffer = buffer[bytes_rcvd:] diff --git a/pyignite/constants.py b/pyignite/constants.py index fc840d6..02f7124 100644 --- a/pyignite/constants.py +++ b/pyignite/constants.py @@ -49,7 +49,7 @@ PROTOCOL_STRING_ENCODING = 'utf-8' PROTOCOL_CHAR_ENCODING = 'utf-16le' -SSL_DEFAULT_VERSION = ssl.PROTOCOL_TLSv1_1 +SSL_DEFAULT_VERSION = ssl.PROTOCOL_TLSv1_2 SSL_DEFAULT_CIPHERS = ssl._DEFAULT_CIPHERS FNV1_OFFSET_BASIS = 0x811c9dc5 diff --git a/pyignite/exceptions.py b/pyignite/exceptions.py index 1b41d32..5933228 100644 --- a/pyignite/exceptions.py +++ b/pyignite/exceptions.py @@ -25,6 +25,15 @@ class ParseError(Exception): pass +class AuthenticationError(Exception): + """ + This exception is raised on authentication failure. + """ + + def __init__(self, message: str): + self.message = message + + class HandshakeError(SocketError): """ This exception is raised on Ignite binary protocol handshake failure, diff --git a/requirements/install.txt b/requirements/install.txt index cecea8f..1ee12a9 100644 --- a/requirements/install.txt +++ b/requirements/install.txt @@ -1,3 +1,3 @@ # these pip packages are necessary for the pyignite to run -attrs==18.1.0 +attrs==20.3.0 diff --git a/requirements/setup.txt b/requirements/setup.txt index 7c55f83..d202467 100644 --- a/requirements/setup.txt +++ b/requirements/setup.txt @@ -1,3 +1,3 @@ # additional package for integrating pytest in setuptools -pytest-runner==4.2 +pytest-runner==5.3.0 diff --git a/requirements/tests.txt b/requirements/tests.txt index 893928e..5d5ae84 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,7 +1,7 @@ # these packages are used for testing -pytest==3.6.1 -pytest-cov==2.5.1 -teamcity-messages==1.21 -psutil==5.6.5 +pytest==6.2.2 +pytest-cov==2.11.1 +teamcity-messages==1.28 +psutil==5.8.0 jinja2==2.11.3 diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py new file mode 100644 index 0000000..b682d01 --- /dev/null +++ b/tests/affinity/conftest.py @@ -0,0 +1,72 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client +from pyignite.api import cache_create, cache_destroy +from tests.util import start_ignite_gen + + +@pytest.fixture(scope='module', autouse=True) +def server1(): + yield from start_ignite_gen(1) + + +@pytest.fixture(scope='module', autouse=True) +def server2(): + yield from start_ignite_gen(2) + + +@pytest.fixture(scope='module', autouse=True) +def server3(): + yield from start_ignite_gen(3) + + +@pytest.fixture +def client(): + client = Client(partition_aware=True) + + client.connect([('127.0.0.1', 10800 + i) for i in range(1, 4)]) + + yield client + + client.close() + + +@pytest.fixture +def client_not_connected(): + client = Client(partition_aware=True) + yield client + client.close() + + +@pytest.fixture +def cache(connected_client): + cache_name = 'my_bucket' + conn = connected_client.random_node + + cache_create(conn, cache_name) + yield cache_name + cache_destroy(conn, cache_name) + + +@pytest.fixture(scope='module', autouse=True) +def skip_if_no_affinity(request, server1): + client = Client(partition_aware=True) + client.connect('127.0.0.1', 10801) + + if not client.partition_awareness_supported_by_protocol: + pytest.skip(f'skipped {request.node.name}, partition awareness is not supported.') diff --git a/tests/test_affinity.py b/tests/affinity/test_affinity.py similarity index 80% rename from tests/test_affinity.py rename to tests/affinity/test_affinity.py index a55251b..ee8f6c0 100644 --- a/tests/test_affinity.py +++ b/tests/affinity/test_affinity.py @@ -27,12 +27,11 @@ from pyignite.datatypes.prop_codes import * -def test_get_node_partitions(client_partition_aware): +def test_get_node_partitions(client): + conn = client.random_node - conn = client_partition_aware.random_node - - cache_1 = client_partition_aware.get_or_create_cache('test_cache_1') - cache_2 = client_partition_aware.get_or_create_cache({ + cache_1 = client.get_or_create_cache('test_cache_1') + cache_2 = client.get_or_create_cache({ PROP_NAME: 'test_cache_2', PROP_CACHE_KEY_CONFIGURATION: [ { @@ -41,9 +40,9 @@ def test_get_node_partitions(client_partition_aware): } ], }) - cache_3 = client_partition_aware.get_or_create_cache('test_cache_3') - cache_4 = client_partition_aware.get_or_create_cache('test_cache_4') - cache_5 = client_partition_aware.get_or_create_cache('test_cache_5') + client.get_or_create_cache('test_cache_3') + client.get_or_create_cache('test_cache_4') + client.get_or_create_cache('test_cache_5') result = cache_get_node_partitions( conn, @@ -115,9 +114,8 @@ def test_get_node_partitions(client_partition_aware): ], ) -def test_affinity(client_partition_aware, key, key_hint): - - cache_1 = client_partition_aware.get_or_create_cache({ +def test_affinity(client, key, key_hint): + cache_1 = client.get_or_create_cache({ PROP_NAME: 'test_cache_1', PROP_CACHE_MODE: CacheMode.PARTITIONED, }) @@ -126,7 +124,7 @@ def test_affinity(client_partition_aware, key, key_hint): best_node = cache_1.get_best_node(key, key_hint=key_hint) - for node in filter(lambda n: n.alive, client_partition_aware._nodes): + for node in filter(lambda n: n.alive, client._nodes): result = cache_local_peek( node, cache_1.cache_id, key, key_hint=key_hint, ) @@ -142,9 +140,8 @@ def test_affinity(client_partition_aware, key, key_hint): cache_1.destroy() -def test_affinity_for_generic_object(client_partition_aware): - - cache_1 = client_partition_aware.get_or_create_cache({ +def test_affinity_for_generic_object(client): + cache_1 = client.get_or_create_cache({ PROP_NAME: 'test_cache_1', PROP_CACHE_MODE: CacheMode.PARTITIONED, }) @@ -166,7 +163,7 @@ class KeyClass( best_node = cache_1.get_best_node(key, key_hint=BinaryObject) - for node in filter(lambda n: n.alive, client_partition_aware._nodes): + for node in filter(lambda n: n.alive, client._nodes): result = cache_local_peek( node, cache_1.cache_id, key, key_hint=BinaryObject, ) @@ -182,16 +179,8 @@ class KeyClass( cache_1.destroy() -def test_affinity_for_generic_object_without_type_hints(client_partition_aware): - - if not client_partition_aware.partition_awareness_supported_by_protocol: - pytest.skip( - 'Best effort affinity is not supported by the protocol {}.'.format( - client_partition_aware.protocol_version - ) - ) - - cache_1 = client_partition_aware.get_or_create_cache({ +def test_affinity_for_generic_object_without_type_hints(client): + cache_1 = client.get_or_create_cache({ PROP_NAME: 'test_cache_1', PROP_CACHE_MODE: CacheMode.PARTITIONED, }) @@ -213,7 +202,7 @@ class KeyClass( best_node = cache_1.get_best_node(key) - for node in filter(lambda n: n.alive, client_partition_aware._nodes): + for node in filter(lambda n: n.alive, client._nodes): result = cache_local_peek( node, cache_1.cache_id, key ) diff --git a/tests/test_affinity_bad_servers.py b/tests/affinity/test_affinity_bad_servers.py similarity index 66% rename from tests/test_affinity_bad_servers.py rename to tests/affinity/test_affinity_bad_servers.py index dce09de..8abf4a0 100644 --- a/tests/test_affinity_bad_servers.py +++ b/tests/affinity/test_affinity_bad_servers.py @@ -16,22 +16,20 @@ import pytest from pyignite.exceptions import ReconnectError -from tests.util import * +from tests.util import start_ignite, kill_process_tree -def test_client_with_multiple_bad_servers(start_client): - client = start_client(partition_aware=True) +def test_client_with_multiple_bad_servers(client_not_connected): with pytest.raises(ReconnectError) as e_info: - client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) + client_not_connected.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) assert str(e_info.value) == "Can not connect." -def test_client_with_failed_server(request, start_ignite_server, start_client): - srv = start_ignite_server(4) +def test_client_with_failed_server(request, client_not_connected): + srv = start_ignite(idx=4) try: - client = start_client() - client.connect([("127.0.0.1", 10804)]) - cache = client.get_or_create_cache(request.node.name) + client_not_connected.connect([("127.0.0.1", 10804)]) + cache = client_not_connected.get_or_create_cache(request.node.name) cache.put(1, 1) kill_process_tree(srv.pid) with pytest.raises(ConnectionResetError): @@ -40,17 +38,16 @@ def test_client_with_failed_server(request, start_ignite_server, start_client): kill_process_tree(srv.pid) -def test_client_with_recovered_server(request, start_ignite_server, start_client): - srv = start_ignite_server(4) +def test_client_with_recovered_server(request, client_not_connected): + srv = start_ignite(idx=4) try: - client = start_client() - client.connect([("127.0.0.1", 10804)]) - cache = client.get_or_create_cache(request.node.name) + client_not_connected.connect([("127.0.0.1", 10804)]) + cache = client_not_connected.get_or_create_cache(request.node.name) cache.put(1, 1) # Kill and restart server kill_process_tree(srv.pid) - srv = start_ignite_server(4) + srv = start_ignite(idx=4) # First request fails with pytest.raises(Exception): diff --git a/tests/test_affinity_request_routing.py b/tests/affinity/test_affinity_request_routing.py similarity index 89% rename from tests/test_affinity_request_routing.py rename to tests/affinity/test_affinity_request_routing.py index 3489dea..101db39 100644 --- a/tests/test_affinity_request_routing.py +++ b/tests/affinity/test_affinity_request_routing.py @@ -70,10 +70,8 @@ def check_grid_idx(): @pytest.mark.parametrize("key,grid_idx", [(1, 1), (2, 2), (3, 3), (4, 1), (5, 1), (6, 2), (11, 1), (13, 1), (19, 1)]) @pytest.mark.parametrize("backups", [0, 1, 2, 3]) -def test_cache_operation_on_primitive_key_routes_request_to_primary_node( - request, key, grid_idx, backups, client_partition_aware): - - cache = client_partition_aware.get_or_create_cache({ +def test_cache_operation_on_primitive_key_routes_request_to_primary_node(request, key, grid_idx, backups, client): + cache = client.get_or_create_cache({ PROP_NAME: request.node.name + str(backups), PROP_BACKUPS_NUMBER: backups, }) @@ -132,8 +130,7 @@ def test_cache_operation_on_complex_key_routes_request_to_primary_node(): @pytest.mark.parametrize("key,grid_idx", [(1, 2), (2, 1), (3, 1), (4, 2), (5, 2), (6, 3)]) @pytest.mark.skip(reason="Custom key objects are not supported yet") -def test_cache_operation_on_custom_affinity_key_routes_request_to_primary_node( - request, client_partition_aware, key, grid_idx): +def test_cache_operation_on_custom_affinity_key_routes_request_to_primary_node(request, client, key, grid_idx): class AffinityTestType1( metaclass=GenericObjectMeta, type_name='AffinityTestType1', @@ -153,7 +150,7 @@ class AffinityTestType1( }, ], } - cache = client_partition_aware.create_cache(cache_config) + cache = client.create_cache(cache_config) # noinspection PyArgumentList key_obj = AffinityTestType1( @@ -167,17 +164,18 @@ class AffinityTestType1( assert requests.pop() == grid_idx -def test_cache_operation_routed_to_new_cluster_node(request, start_ignite_server, start_client): - client = start_client(partition_aware=True) - client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802), ("127.0.0.1", 10803), ("127.0.0.1", 10804)]) - cache = client.get_or_create_cache(request.node.name) +def test_cache_operation_routed_to_new_cluster_node(request, client_not_connected): + client_not_connected.connect( + [("127.0.0.1", 10801), ("127.0.0.1", 10802), ("127.0.0.1", 10803), ("127.0.0.1", 10804)] + ) + cache = client_not_connected.get_or_create_cache(request.node.name) key = 12 wait_for_affinity_distribution(cache, key, 3) cache.put(key, key) cache.put(key, key) assert requests.pop() == 3 - srv = start_ignite_server(4) + srv = start_ignite(idx=4) try: # Wait for rebalance and partition map exchange wait_for_affinity_distribution(cache, key, 4) @@ -190,8 +188,8 @@ def test_cache_operation_routed_to_new_cluster_node(request, start_ignite_server kill_process_tree(srv.pid) -def test_replicated_cache_operation_routed_to_random_node(request, client_partition_aware): - cache = client_partition_aware.get_or_create_cache({ +def test_replicated_cache_operation_routed_to_random_node(request, client): + cache = client.get_or_create_cache({ PROP_NAME: request.node.name, PROP_CACHE_MODE: CacheMode.REPLICATED, }) diff --git a/tests/test_affinity_single_connection.py b/tests/affinity/test_affinity_single_connection.py similarity index 90% rename from tests/test_affinity_single_connection.py rename to tests/affinity/test_affinity_single_connection.py index 1943384..0768011 100644 --- a/tests/test_affinity_single_connection.py +++ b/tests/affinity/test_affinity_single_connection.py @@ -13,9 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest -def test_all_cache_operations_with_partition_aware_client_on_single_server(request, client_partition_aware_single_server): - cache = client_partition_aware_single_server.get_or_create_cache(request.node.name) +from pyignite import Client + + +@pytest.fixture(scope='module') +def client(): + client = Client(partition_aware=True) + client.connect('127.0.0.1', 10801) + yield client + client.close() + + +def test_all_cache_operations_with_partition_aware_client_on_single_server(request, client): + cache = client.get_or_create_cache(request.node.name) key = 1 key2 = 2 diff --git a/tests/common/conftest.py b/tests/common/conftest.py new file mode 100644 index 0000000..402aede --- /dev/null +++ b/tests/common/conftest.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client +from pyignite.api import cache_create, cache_destroy +from tests.util import start_ignite_gen + + +@pytest.fixture(scope='module', autouse=True) +def server1(): + yield from start_ignite_gen(1) + + +@pytest.fixture(scope='module', autouse=True) +def server2(): + yield from start_ignite_gen(2) + + +@pytest.fixture(scope='module', autouse=True) +def server3(): + yield from start_ignite_gen(3) + + +@pytest.fixture(scope='module') +def client(): + client = Client() + + client.connect('127.0.0.1', 10801) + + yield client + + client.close() + + +@pytest.fixture +def cache(client): + cache_name = 'my_bucket' + conn = client.random_node + + cache_create(conn, cache_name) + yield cache_name + cache_destroy(conn, cache_name) diff --git a/tests/test_binary.py b/tests/common/test_binary.py similarity index 100% rename from tests/test_binary.py rename to tests/common/test_binary.py diff --git a/tests/test_cache_class.py b/tests/common/test_cache_class.py similarity index 100% rename from tests/test_cache_class.py rename to tests/common/test_cache_class.py diff --git a/tests/test_cache_class_sql.py b/tests/common/test_cache_class_sql.py similarity index 100% rename from tests/test_cache_class_sql.py rename to tests/common/test_cache_class_sql.py diff --git a/tests/test_cache_composite_key_class_sql.py b/tests/common/test_cache_composite_key_class_sql.py similarity index 100% rename from tests/test_cache_composite_key_class_sql.py rename to tests/common/test_cache_composite_key_class_sql.py diff --git a/tests/test_cache_config.py b/tests/common/test_cache_config.py similarity index 100% rename from tests/test_cache_config.py rename to tests/common/test_cache_config.py diff --git a/tests/test_datatypes.py b/tests/common/test_datatypes.py similarity index 100% rename from tests/test_datatypes.py rename to tests/common/test_datatypes.py diff --git a/tests/test_generic_object.py b/tests/common/test_generic_object.py similarity index 100% rename from tests/test_generic_object.py rename to tests/common/test_generic_object.py diff --git a/tests/test_get_names.py b/tests/common/test_get_names.py similarity index 100% rename from tests/test_get_names.py rename to tests/common/test_get_names.py diff --git a/tests/test_key_value.py b/tests/common/test_key_value.py similarity index 100% rename from tests/test_key_value.py rename to tests/common/test_key_value.py diff --git a/tests/test_scan.py b/tests/common/test_scan.py similarity index 100% rename from tests/test_scan.py rename to tests/common/test_scan.py diff --git a/tests/test_sql.py b/tests/common/test_sql.py similarity index 98% rename from tests/test_sql.py rename to tests/common/test_sql.py index f25fedd..cc68a02 100644 --- a/tests/test_sql.py +++ b/tests/common/test_sql.py @@ -182,7 +182,7 @@ def test_long_multipage_query(client): client.sql('DROP TABLE LongMultipageQuery IF EXISTS') client.sql("CREATE TABLE LongMultiPageQuery (%s, %s)" % - (fields[0] + " INT(11) PRIMARY KEY", ",".join(map(lambda f: f + " INT(11)", fields[1:])))) + (fields[0] + " INT(11) PRIMARY KEY", ",".join(map(lambda f: f + " INT(11)", fields[1:])))) for id in range(1, 21): client.sql( diff --git a/tests/config/ignite-config.xml.jinja2 b/tests/config/ignite-config.xml.jinja2 index 834b5d8..85daf0f 100644 --- a/tests/config/ignite-config.xml.jinja2 +++ b/tests/config/ignite-config.xml.jinja2 @@ -27,6 +27,20 @@ http://www.springframework.org/schema/util/spring-util.xsd"> + {% if use_auth %} + + + + + + + + + + + + {% endif %} + {% if use_ssl %} {% endif %} diff --git a/tests/conftest.py b/tests/conftest.py index bd86f9c..59b7d3a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -12,188 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import argparse -from distutils.util import strtobool -import ssl - import pytest -from pyignite import Client -from pyignite.constants import * -from pyignite.api import cache_create, cache_destroy -from tests.util import _start_ignite, start_ignite_gen - - -class BoolParser(argparse.Action): - - def __call__(self, parser, namespace, values, option_string=None): - values = True if values is None else bool(strtobool(values)) - setattr(namespace, self.dest, values) - - -class CertReqsParser(argparse.Action): - conv_map = { - 'NONE': ssl.CERT_NONE, - 'OPTIONAL': ssl.CERT_OPTIONAL, - 'REQUIRED': ssl.CERT_REQUIRED, - } - - def __call__(self, parser, namespace, values, option_string=None): - value = values.upper() - if value in self.conv_map: - setattr(namespace, self.dest, self.conv_map[value]) - else: - raise ValueError( - 'Undefined argument: --ssl-cert-reqs={}'.format(value) - ) - - -class SSLVersionParser(argparse.Action): - conv_map = { - 'TLSV1_1': ssl.PROTOCOL_TLSv1_1, - 'TLSV1_2': ssl.PROTOCOL_TLSv1_2, - } - - def __call__(self, parser, namespace, values, option_string=None): - value = values.upper() - if value in self.conv_map: - setattr(namespace, self.dest, self.conv_map[value]) - else: - raise ValueError( - 'Undefined argument: --ssl-version={}'.format(value) - ) - - -@pytest.fixture(scope='session', autouse=True) -def server1(request): - yield from start_ignite_server_gen(1, request) - - -@pytest.fixture(scope='session', autouse=True) -def server2(request): - yield from start_ignite_server_gen(2, request) - - -@pytest.fixture(scope='session', autouse=True) -def server3(request): - yield from start_ignite_server_gen(3, request) - - -@pytest.fixture(scope='module') -def start_ignite_server(use_ssl): - def start(idx=1): - return _start_ignite(idx, use_ssl=use_ssl) - - return start - - -def start_ignite_server_gen(idx, request): - use_ssl = request.config.getoption("--use-ssl") - yield from start_ignite_gen(idx, use_ssl) - - -@pytest.fixture(scope='module') -def client( - node, timeout, partition_aware, use_ssl, ssl_keyfile, ssl_keyfile_password, - ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, - username, password, -): - yield from client0(node, timeout, partition_aware, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, - ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, password) - - -@pytest.fixture(scope='module') -def client_partition_aware( - node, timeout, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, - ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, - password -): - yield from client0(node, timeout, True, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, - ssl_cert_reqs, ssl_ciphers, ssl_version, username, password) - - -@pytest.fixture(scope='module') -def client_partition_aware_single_server( - node, timeout, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, - ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, username, - password -): - node = node[:1] - yield from client0(node, timeout, True, use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, - ssl_cert_reqs, ssl_ciphers, ssl_version, username, password) - - -@pytest.fixture -def cache(client): - cache_name = 'my_bucket' - conn = client.random_node - - cache_create(conn, cache_name) - yield cache_name - cache_destroy(conn, cache_name) - - -@pytest.fixture(scope='module') -def start_client(use_ssl, ssl_keyfile, ssl_keyfile_password, ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, - ssl_version,username, password): - def start(**kwargs): - cli_kw = kwargs.copy() - cli_kw.update({ - 'use_ssl': use_ssl, - 'ssl_keyfile': ssl_keyfile, - 'ssl_keyfile_password': ssl_keyfile_password, - 'ssl_certfile': ssl_certfile, - 'ssl_ca_certfile': ssl_ca_certfile, - 'ssl_cert_reqs': ssl_cert_reqs, - 'ssl_ciphers': ssl_ciphers, - 'ssl_version': ssl_version, - 'username': username, - 'password': password - }) - return Client(**cli_kw) - - return start - - -def client0( - node, timeout, partition_aware, use_ssl, ssl_keyfile, ssl_keyfile_password, - ssl_certfile, ssl_ca_certfile, ssl_cert_reqs, ssl_ciphers, ssl_version, - username, password, -): - client = Client( - timeout=timeout, - partition_aware=partition_aware, - use_ssl=use_ssl, - ssl_keyfile=ssl_keyfile, - ssl_keyfile_password=ssl_keyfile_password, - ssl_certfile=ssl_certfile, - ssl_ca_certfile=ssl_ca_certfile, - ssl_cert_reqs=ssl_cert_reqs, - ssl_ciphers=ssl_ciphers, - ssl_version=ssl_version, - username=username, - password=password, - ) - nodes = [] - for n in node: - host, port = n.split(':') - port = int(port) - nodes.append((host, port)) - client.connect(nodes) - yield client - client.close() - - -@pytest.fixture -def examples(request): - return request.config.getoption("--examples") - @pytest.fixture(autouse=True) -def run_examples(request, examples): +def run_examples(request): + run_examples = request.config.getoption("--examples") if request.node.get_closest_marker('examples'): - if not examples: + if not run_examples: pytest.skip('skipped examples: --examples is not passed') @@ -213,103 +39,6 @@ def skip_if_no_cext(request): def pytest_addoption(parser): - parser.addoption( - '--node', - action='append', - default=None, - help=( - 'Ignite binary protocol test server connection string ' - '(default: "localhost:10801")' - ) - ) - parser.addoption( - '--timeout', - action='store', - type=float, - default=2.0, - help=( - 'Timeout (in seconds) for each socket operation. Can accept ' - 'integer or float value. Default is None' - ) - ) - parser.addoption( - '--partition-aware', - action=BoolParser, - nargs='?', - default=False, - help='Turn on the best effort affinity feature' - ) - parser.addoption( - '--use-ssl', - action=BoolParser, - nargs='?', - default=False, - help='Use SSL encryption' - ) - parser.addoption( - '--ssl-keyfile', - action='store', - default=None, - type=str, - help='a path to SSL key file to identify local party' - ) - parser.addoption( - '--ssl-keyfile-password', - action='store', - default=None, - type=str, - help='password for SSL key file' - ) - parser.addoption( - '--ssl-certfile', - action='store', - default=None, - type=str, - help='a path to ssl certificate file to identify local party' - ) - parser.addoption( - '--ssl-ca-certfile', - action='store', - default=None, - type=str, - help='a path to a trusted certificate or a certificate chain' - ) - parser.addoption( - '--ssl-cert-reqs', - action=CertReqsParser, - default=ssl.CERT_NONE, - help=( - 'determines how the remote side certificate is treated: ' - 'NONE (ignore, default), ' - 'OPTIONAL (validate, if provided) or ' - 'REQUIRED (valid remote certificate is required)' - ) - ) - parser.addoption( - '--ssl-ciphers', - action='store', - default=SSL_DEFAULT_CIPHERS, - type=str, - help='ciphers to use' - ) - parser.addoption( - '--ssl-version', - action=SSLVersionParser, - default=SSL_DEFAULT_VERSION, - help='SSL version: TLSV1_1 or TLSV1_2' - ) - parser.addoption( - '--username', - action='store', - type=str, - help='user name' - ) - parser.addoption( - '--password', - action='store', - type=str, - help='password' - ) parser.addoption( '--examples', action='store_true', @@ -322,38 +51,11 @@ def pytest_addoption(parser): ) -def pytest_generate_tests(metafunc): - session_parameters = { - 'node': ['{host}:{port}'.format(host='127.0.0.1', port=10801), - '{host}:{port}'.format(host='127.0.0.1', port=10802), - '{host}:{port}'.format(host='127.0.0.1', port=10803)], - 'timeout': None, - 'partition_aware': False, - 'use_ssl': False, - 'ssl_keyfile': None, - 'ssl_keyfile_password': None, - 'ssl_certfile': None, - 'ssl_ca_certfile': None, - 'ssl_cert_reqs': ssl.CERT_NONE, - 'ssl_ciphers': SSL_DEFAULT_CIPHERS, - 'ssl_version': SSL_DEFAULT_VERSION, - 'username': None, - 'password': None, - } - - for param_name in session_parameters: - if param_name in metafunc.fixturenames: - param = metafunc.config.getoption(param_name) - # TODO: This does not work for bool - if param is None: - param = session_parameters[param_name] - if param_name == 'node' or type(param) is not list: - param = [param] - metafunc.parametrize(param_name, param, scope='session') - - def pytest_configure(config): - config.addinivalue_line( - "markers", "examples: mark test to run only if --examples are set\n" - "skip_if_no_cext: mark test to run only if c extension is available" - ) + marker_docs = [ + "skip_if_no_cext: mark test to run only if c extension is available", + "examples: mark test to run only if --examples are set" + ] + + for marker_doc in marker_docs: + config.addinivalue_line("markers", marker_doc) diff --git a/tests/security/conftest.py b/tests/security/conftest.py new file mode 100644 index 0000000..d5de5a1 --- /dev/null +++ b/tests/security/conftest.py @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import pytest + +from tests.util import get_test_dir + + +@pytest.fixture +def ssl_params(): + yield __create_ssl_param(False) + + +@pytest.fixture +def ssl_params_with_password(): + yield __create_ssl_param(True) + + +def __create_ssl_param(with_password=False): + cert_path = os.path.join(get_test_dir(), 'config', 'ssl') + + if with_password: + cert = os.path.join(cert_path, 'client_with_pass_full.pem') + return { + 'ssl_keyfile': cert, + 'ssl_keyfile_password': '654321', + 'ssl_certfile': cert, + 'ssl_ca_certfile': cert, + } + else: + cert = os.path.join(cert_path, 'client_full.pem') + return { + 'ssl_keyfile': cert, + 'ssl_certfile': cert, + 'ssl_ca_certfile': cert + } diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py new file mode 100644 index 0000000..2dd19a0 --- /dev/null +++ b/tests/security/test_auth.py @@ -0,0 +1,63 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from pyignite.exceptions import AuthenticationError +from tests.util import start_ignite_gen, clear_ignite_work_dir, get_client + +DEFAULT_IGNITE_USERNAME = 'ignite' +DEFAULT_IGNITE_PASSWORD = 'ignite' + + +@pytest.fixture(params=['with-ssl', 'without-ssl']) +def with_ssl(request): + return request.param == 'with-ssl' + + +@pytest.fixture(autouse=True) +def server(with_ssl, cleanup): + yield from start_ignite_gen(use_ssl=with_ssl, use_auth=True) + + +@pytest.fixture(scope='module', autouse=True) +def cleanup(): + clear_ignite_work_dir() + yield None + clear_ignite_work_dir() + + +def test_auth_success(with_ssl, ssl_params): + ssl_params['use_ssl'] = with_ssl + + with get_client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) as client: + client.connect("127.0.0.1", 10801) + + assert all(node.alive for node in client._nodes) + + +@pytest.mark.parametrize( + 'username, password', + [ + [DEFAULT_IGNITE_USERNAME, None], + ['invalid_user', 'invalid_password'], + [None, None] + ] +) +def test_auth_failed(username, password, with_ssl, ssl_params): + ssl_params['use_ssl'] = with_ssl + + with pytest.raises(AuthenticationError): + with get_client(username=username, password=password, **ssl_params) as client: + client.connect("127.0.0.1", 10801) diff --git a/tests/security/test_ssl.py b/tests/security/test_ssl.py new file mode 100644 index 0000000..6463a03 --- /dev/null +++ b/tests/security/test_ssl.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from pyignite.exceptions import ReconnectError +from tests.util import start_ignite_gen, get_client, get_or_create_cache + + +@pytest.fixture(scope='module', autouse=True) +def server(): + yield from start_ignite_gen(use_ssl=True, use_auth=False) + + +def test_connect_ssl_keystore_with_password(ssl_params_with_password): + __test_connect_ssl(**ssl_params_with_password) + + +def test_connect_ssl(ssl_params): + __test_connect_ssl(**ssl_params) + +def __test_connect_ssl(**kwargs): + kwargs['use_ssl'] = True + + with get_client(**kwargs) as client: + client.connect("127.0.0.1", 10801) + + with get_or_create_cache(client, 'test-cache') as cache: + cache.put(1, 1) + + assert cache.get(1) == 1 + + +@pytest.mark.parametrize( + 'invalid_ssl_params', + [ + {'use_ssl': False}, + {'use_ssl': True}, + {'use_ssl': True, 'ssl_keyfile': 'invalid.pem', 'ssl_certfile': 'invalid.pem'} + ] +) +def test_connection_error_with_incorrect_config(invalid_ssl_params): + with pytest.raises(ReconnectError): + with get_client(**invalid_ssl_params) as client: + client.connect([("127.0.0.1", 10801)]) diff --git a/tests/test_examples.py b/tests/test_examples.py index 046eb6d..f90ed17 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -12,40 +12,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import glob +import os import subprocess import sys import pytest +from tests.util import get_test_dir, start_ignite_gen SKIP_LIST = [ 'failover.py', # it hangs by design ] -def run_subprocess_34(script: str): - return subprocess.call([ - 'python', - '../examples/{}'.format(script), - ]) +def examples_scripts_gen(): + examples_dir = os.path.join(get_test_dir(), '..', 'examples') + for script in glob.glob1(examples_dir, '*.py'): + if script not in SKIP_LIST: + yield os.path.join(examples_dir, script) -def run_subprocess_35(script: str): - return subprocess.run([ - 'python', - '../examples/{}'.format(script), - ]).returncode +@pytest.fixture(autouse=True) +def server(): + yield from start_ignite_gen(idx=0) # idx=0, because 10800 port is needed for examples. @pytest.mark.examples -def test_examples(): - for script in glob.glob1('../examples', '*.py'): - if script not in SKIP_LIST: - # `subprocess` module was refactored in Python 3.5 - if sys.version_info >= (3, 5): - return_code = run_subprocess_35(script) - else: - return_code = run_subprocess_34(script) - assert return_code == 0 +@pytest.mark.parametrize( + 'example_script', + examples_scripts_gen() +) +def test_examples(example_script): + proc = subprocess.run([ + sys.executable, + example_script + ]) + + assert proc.returncode == 0 diff --git a/tests/util.py b/tests/util.py index 90f0146..af4c324 100644 --- a/tests/util.py +++ b/tests/util.py @@ -12,9 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import contextlib import glob import os +import shutil import jinja2 as jinja2 import psutil @@ -23,6 +24,26 @@ import subprocess import time +from pyignite import Client + + +@contextlib.contextmanager +def get_client(**kwargs): + client = Client(**kwargs) + try: + yield client + finally: + client.close() + + +@contextlib.contextmanager +def get_or_create_cache(client, cache_name): + cache = client.get_or_create_cache(cache_name) + try: + yield cache + finally: + cache.destroy() + def wait_for_condition(condition, interval=0.1, timeout=10, error=None): start = time.time() @@ -111,7 +132,7 @@ def create_config_file(tpl_name, file_name, **kwargs): f.write(template.render(**kwargs)) -def _start_ignite(idx=1, debug=False, use_ssl=False): +def start_ignite(idx=1, debug=False, use_ssl=False, use_auth=False): clear_logs(idx) runner = get_ignite_runner() @@ -122,7 +143,8 @@ def _start_ignite(idx=1, debug=False, use_ssl=False): env["JVM_OPTS"] = "-Djava.net.preferIPv4Stack=true -Xdebug -Xnoagent -Djava.compiler=NONE " \ "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 " - params = {'ignite_instance_idx': str(idx), 'ignite_client_port': 10800 + idx, 'use_ssl': use_ssl} + params = {'ignite_instance_idx': str(idx), 'ignite_client_port': 10800 + idx, 'use_ssl': use_ssl, + 'use_auth': use_auth} create_config_file('log4j.xml.jinja2', f'log4j-{idx}.xml', **params) create_config_file('ignite-config.xml.jinja2', f'ignite-config-{idx}.xml', **params) @@ -140,10 +162,12 @@ def _start_ignite(idx=1, debug=False, use_ssl=False): raise Exception("Failed to start Ignite: timeout while trying to connect") -def start_ignite_gen(idx=1, use_ssl=False): - srv = _start_ignite(idx, use_ssl=use_ssl) - yield srv - kill_process_tree(srv.pid) +def start_ignite_gen(idx=1, use_ssl=False, use_auth=False): + srv = start_ignite(idx, use_ssl=use_ssl, use_auth=use_auth) + try: + yield srv + finally: + kill_process_tree(srv.pid) def get_log_files(idx=1): @@ -151,6 +175,13 @@ def get_log_files(idx=1): return glob.glob(logs_pattern) +def clear_ignite_work_dir(): + for ignite_dir in get_ignite_dirs(): + work_dir = os.path.join(ignite_dir, 'work') + if os.path.exists(work_dir): + shutil.rmtree(work_dir, ignore_errors=True) + + def clear_logs(idx=1): for f in get_log_files(idx): os.remove(f) diff --git a/tox.ini b/tox.ini index 104a705..3ab8dea 100644 --- a/tox.ini +++ b/tox.ini @@ -15,7 +15,7 @@ [tox] skipsdist = True -envlist = py{36,37,38}-{no-ssl,ssl,ssl-password} +envlist = py{36,37,38,39} [testenv] passenv = TEAMCITY_VERSION IGNITE_HOME @@ -26,44 +26,8 @@ deps = recreate = True usedevelop = True commands = - pytest {env:PYTESTARGS:} {posargs} --force-cext + pytest {env:PYTESTARGS:} {posargs} --force-cext --examples -[jenkins] +[testenv:py{36,37,38,39}-jenkins] setenv: PYTESTARGS = --junitxml=junit-{envname}.xml - -[no-ssl] -setenv: - PYTEST_ADDOPTS = --examples - -[ssl] -setenv: - PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_full.pem --ssl-version=TLSV1_2 - -[ssl-password] -setenv: - PYTEST_ADDOPTS = --examples --use-ssl=True --ssl-certfile={toxinidir}/tests/config/ssl/client_with_pass_full.pem --ssl-keyfile-password=654321 --ssl-version=TLSV1_2 - -[testenv:py{36,37,38}-no-ssl] -setenv: {[no-ssl]setenv} - -[testenv:py{36,37,38}-ssl] -setenv: {[ssl]setenv} - -[testenv:py{36,37,38}-ssl-password] -setenv: {[ssl-password]setenv} - -[testenv:py{36,37,38}-jenkins-no-ssl] -setenv: - {[no-ssl]setenv} - {[jenkins]setenv} - -[testenv:py{36,37,38}-jenkins-ssl] -setenv: - {[ssl]setenv} - {[jenkins]setenv} - -[testenv:py{36,37,38}-jenkins-ssl-password] -setenv: - {[ssl-password]setenv} - {[jenkins]setenv} From 6f56a3bf30f98c2a960d6f909f97bd2094e64473 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Fri, 26 Feb 2021 22:52:58 +0300 Subject: [PATCH 19/62] IGNITE-14245 Fix infinite loop while trying to get affinity mapping on failed node This closes #20 --- .travis.yml | 51 +++++++++++++++++ pyignite/cache.py | 1 + tests/affinity/conftest.py | 8 ++- tests/affinity/test_affinity_bad_servers.py | 63 +++++++++++++-------- 4 files changed, 98 insertions(+), 25 deletions(-) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..7e726be --- /dev/null +++ b/.travis.yml @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +language: python +sudo: required + +addons: + apt: + packages: + - openjdk-8-jdk + +env: + global: + - IGNITE_VERSION=2.9.1 + - IGNITE_HOME=/opt/ignite + +before_install: + - curl -L https://apache-mirror.rbc.ru/pub/apache/ignite/${IGNITE_VERSION}/apache-ignite-slim-${IGNITE_VERSION}-bin.zip > ignite.zip + - unzip ignite.zip -d /opt + - mv /opt/apache-ignite-slim-${IGNITE_VERSION}-bin /opt/ignite + - mv /opt/ignite/libs/optional/ignite-log4j2 /opt/ignite/libs/ + +jobs: + include: + - python: '3.6' + arch: amd64 + env: TOXENV=py36 + - python: '3.7' + arch: amd64 + env: TOXENV=py37 + - python: '3.8' + arch: amd64 + env: TOXENV=py38 + - python: '3.9' + arch: amd64 + env: TOXENV=py39 + +install: pip install tox +script: tox \ No newline at end of file diff --git a/pyignite/cache.py b/pyignite/cache.py index ea672a8..a91a3cf 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -264,6 +264,7 @@ def get_best_node( break except connection_errors: # retry if connection failed + conn = self._client.random_node pass except CacheError: # server did not create mapping in time diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py index b682d01..7595f25 100644 --- a/tests/affinity/conftest.py +++ b/tests/affinity/conftest.py @@ -19,6 +19,10 @@ from pyignite.api import cache_create, cache_destroy from tests.util import start_ignite_gen +# Sometimes on slow testing servers and unstable topology +# default timeout is not enough for cache ops. +CLIENT_SOCKET_TIMEOUT = 20.0 + @pytest.fixture(scope='module', autouse=True) def server1(): @@ -37,7 +41,7 @@ def server3(): @pytest.fixture def client(): - client = Client(partition_aware=True) + client = Client(partition_aware=True, timeout=CLIENT_SOCKET_TIMEOUT) client.connect([('127.0.0.1', 10800 + i) for i in range(1, 4)]) @@ -48,7 +52,7 @@ def client(): @pytest.fixture def client_not_connected(): - client = Client(partition_aware=True) + client = Client(partition_aware=True, timeout=CLIENT_SOCKET_TIMEOUT) yield client client.close() diff --git a/tests/affinity/test_affinity_bad_servers.py b/tests/affinity/test_affinity_bad_servers.py index 8abf4a0..6fd08d5 100644 --- a/tests/affinity/test_affinity_bad_servers.py +++ b/tests/affinity/test_affinity_bad_servers.py @@ -16,45 +16,62 @@ import pytest from pyignite.exceptions import ReconnectError -from tests.util import start_ignite, kill_process_tree +from tests.affinity.conftest import CLIENT_SOCKET_TIMEOUT +from tests.util import start_ignite, kill_process_tree, get_client -def test_client_with_multiple_bad_servers(client_not_connected): +@pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) +def with_partition_awareness(request): + yield request.param == 'with-partition-awareness' + + +def test_client_with_multiple_bad_servers(with_partition_awareness): with pytest.raises(ReconnectError) as e_info: - client_not_connected.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) + with get_client(partition_aware=with_partition_awareness) as client: + client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) assert str(e_info.value) == "Can not connect." -def test_client_with_failed_server(request, client_not_connected): +def test_client_with_failed_server(request, with_partition_awareness): srv = start_ignite(idx=4) try: - client_not_connected.connect([("127.0.0.1", 10804)]) - cache = client_not_connected.get_or_create_cache(request.node.name) - cache.put(1, 1) - kill_process_tree(srv.pid) - with pytest.raises(ConnectionResetError): - cache.get(1) + with get_client(partition_aware=with_partition_awareness) as client: + client.connect([("127.0.0.1", 10804)]) + cache = client.get_or_create_cache(request.node.name) + cache.put(1, 1) + kill_process_tree(srv.pid) + + if with_partition_awareness: + ex_class = (ReconnectError, ConnectionResetError) + else: + ex_class = ConnectionResetError + + with pytest.raises(ex_class): + cache.get(1) finally: kill_process_tree(srv.pid) -def test_client_with_recovered_server(request, client_not_connected): +def test_client_with_recovered_server(request, with_partition_awareness): srv = start_ignite(idx=4) try: - client_not_connected.connect([("127.0.0.1", 10804)]) - cache = client_not_connected.get_or_create_cache(request.node.name) - cache.put(1, 1) + with get_client(partition_aware=with_partition_awareness, timeout=CLIENT_SOCKET_TIMEOUT) as client: + client.connect([("127.0.0.1", 10804)]) + cache = client.get_or_create_cache(request.node.name) + cache.put(1, 1) - # Kill and restart server - kill_process_tree(srv.pid) - srv = start_ignite(idx=4) + # Kill and restart server + kill_process_tree(srv.pid) + srv = start_ignite(idx=4) - # First request fails - with pytest.raises(Exception): - cache.put(1, 2) + # First request may fail. + try: + cache.put(1, 2) + except: + pass - # Retry succeeds - cache.put(1, 2) - assert cache.get(1) == 2 + # Retry succeeds + cache.put(1, 2) + assert cache.get(1) == 2 finally: kill_process_tree(srv.pid) From cfc2fe4c795d974164773c33c4db9e60eaaeabb3 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Tue, 23 Mar 2021 21:01:29 +0300 Subject: [PATCH 20/62] IGNITE-13911 Asyncio version of client This closes #21 --- .gitignore | 1 + .travis.yml | 3 + examples/create_binary.py | 76 +-- examples/sql.py | 2 +- pyignite/__init__.py | 3 + pyignite/aio_cache.py | 600 +++++++++++++++++ pyignite/aio_client.py | 358 +++++++++++ pyignite/api/__init__.py | 76 +-- pyignite/api/affinity.py | 100 +-- pyignite/api/binary.py | 122 ++-- pyignite/api/cache_config.py | 229 +++---- pyignite/api/key_value.py | 604 ++++++++++++------ pyignite/api/result.py | 2 +- pyignite/api/sql.py | 139 ++-- pyignite/binary.py | 93 ++- pyignite/cache.py | 435 +++++-------- pyignite/client.py | 509 +++++++-------- pyignite/connection/__init__.py | 3 +- pyignite/connection/aio_connection.py | 242 +++++++ pyignite/connection/connection.py | 276 ++++---- pyignite/connection/handshake.py | 72 ++- pyignite/connection/ssl.py | 72 ++- pyignite/cursors.py | 319 +++++++++ pyignite/datatypes/__init__.py | 19 - pyignite/datatypes/base.py | 32 +- pyignite/datatypes/cache_properties.py | 19 +- pyignite/datatypes/complex.py | 497 ++++++++++---- pyignite/datatypes/internal.py | 317 ++++++--- pyignite/datatypes/null_object.py | 96 ++- pyignite/datatypes/primitive.py | 3 +- pyignite/datatypes/primitive_arrays.py | 26 +- pyignite/datatypes/primitive_objects.py | 50 +- pyignite/datatypes/standard.py | 72 +-- pyignite/exceptions.py | 2 +- pyignite/queries/__init__.py | 2 +- pyignite/queries/query.py | 129 +++- pyignite/queries/response.py | 238 +++++-- pyignite/stream/__init__.py | 4 +- pyignite/stream/binary_stream.py | 75 ++- pyignite/utils.py | 49 +- requirements/tests.txt | 3 + setup.py | 12 +- tests/affinity/conftest.py | 35 +- tests/affinity/test_affinity.py | 432 ++++++++----- tests/affinity/test_affinity_bad_servers.py | 63 +- .../affinity/test_affinity_request_routing.py | 278 +++++--- .../test_affinity_single_connection.py | 107 +++- tests/common/conftest.py | 38 +- tests/common/test_binary.py | 443 ++++++++----- tests/common/test_cache_class.py | 241 ++++--- tests/common/test_cache_class_sql.py | 103 --- .../test_cache_composite_key_class_sql.py | 122 ---- tests/common/test_cache_config.py | 128 ++-- tests/common/test_datatypes.py | 368 ++++++----- tests/common/test_generic_object.py | 3 +- tests/common/test_get_names.py | 23 +- tests/common/test_key_value.py | 577 ++++++++--------- tests/common/test_scan.py | 170 +++-- tests/common/test_sql.py | 425 ++++++------ tests/common/test_sql_composite_key.py | 168 +++++ tests/conftest.py | 12 +- tests/security/test_auth.py | 39 +- tests/security/test_ssl.py | 61 +- tests/test_cutils.py | 4 +- tests/util.py | 44 +- tox.ini | 10 +- 66 files changed, 6523 insertions(+), 3352 deletions(-) create mode 100644 pyignite/aio_cache.py create mode 100644 pyignite/aio_client.py create mode 100644 pyignite/connection/aio_connection.py create mode 100644 pyignite/cursors.py delete mode 100644 tests/common/test_cache_class_sql.py delete mode 100644 tests/common/test_cache_composite_key_class_sql.py create mode 100644 tests/common/test_sql_composite_key.py diff --git a/.gitignore b/.gitignore index 699c26d..14ec495 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ *.so build distr +docs/generated tests/config/*.xml junit*.xml pyignite.egg-info diff --git a/.travis.yml b/.travis.yml index 7e726be..74909b8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -43,6 +43,9 @@ jobs: - python: '3.8' arch: amd64 env: TOXENV=py38 + - python: '3.8' + arch: amd64 + env: TOXENV=codestyle - python: '3.9' arch: amd64 env: TOXENV=py39 diff --git a/examples/create_binary.py b/examples/create_binary.py index c963796..b199527 100644 --- a/examples/create_binary.py +++ b/examples/create_binary.py @@ -23,44 +23,44 @@ client.connect('127.0.0.1', 10800) student_cache = client.create_cache({ - PROP_NAME: 'SQL_PUBLIC_STUDENT', - PROP_SQL_SCHEMA: 'PUBLIC', - PROP_QUERY_ENTITIES: [ - { - 'table_name': 'Student'.upper(), - 'key_field_name': 'SID', - 'key_type_name': 'java.lang.Integer', - 'field_name_aliases': [], - 'query_fields': [ - { - 'name': 'SID', - 'type_name': 'java.lang.Integer', - 'is_key_field': True, - 'is_notnull_constraint_field': True, - }, - { - 'name': 'NAME', - 'type_name': 'java.lang.String', - }, - { - 'name': 'LOGIN', - 'type_name': 'java.lang.String', - }, - { - 'name': 'AGE', - 'type_name': 'java.lang.Integer', - }, - { - 'name': 'GPA', - 'type_name': 'java.math.Double', - }, - ], - 'query_indexes': [], - 'value_type_name': 'SQL_PUBLIC_STUDENT_TYPE', - 'value_field_name': None, - }, - ], - }) + PROP_NAME: 'SQL_PUBLIC_STUDENT', + PROP_SQL_SCHEMA: 'PUBLIC', + PROP_QUERY_ENTITIES: [ + { + 'table_name': 'Student'.upper(), + 'key_field_name': 'SID', + 'key_type_name': 'java.lang.Integer', + 'field_name_aliases': [], + 'query_fields': [ + { + 'name': 'SID', + 'type_name': 'java.lang.Integer', + 'is_key_field': True, + 'is_notnull_constraint_field': True, + }, + { + 'name': 'NAME', + 'type_name': 'java.lang.String', + }, + { + 'name': 'LOGIN', + 'type_name': 'java.lang.String', + }, + { + 'name': 'AGE', + 'type_name': 'java.lang.Integer', + }, + { + 'name': 'GPA', + 'type_name': 'java.math.Double', + }, + ], + 'query_indexes': [], + 'value_type_name': 'SQL_PUBLIC_STUDENT_TYPE', + 'value_field_name': None, + }, + ], +}) class Student( diff --git a/examples/sql.py b/examples/sql.py index 8f0ee7c..0e8c729 100644 --- a/examples/sql.py +++ b/examples/sql.py @@ -280,7 +280,7 @@ field_data = list(*result) print('City info:') -for field_name, field_value in zip(field_names*len(field_data), field_data): +for field_name, field_value in zip(field_names * len(field_data), field_data): print('{}: {}'.format(field_name, field_value)) # City info: # ID: 3802 diff --git a/pyignite/__init__.py b/pyignite/__init__.py index 0ac346f..c26c59a 100644 --- a/pyignite/__init__.py +++ b/pyignite/__init__.py @@ -14,4 +14,7 @@ # limitations under the License. from pyignite.client import Client +from pyignite.aio_client import AioClient from pyignite.binary import GenericObjectMeta + +__version__ = '0.4.0-dev' diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py new file mode 100644 index 0000000..b92a14c --- /dev/null +++ b/pyignite/aio_cache.py @@ -0,0 +1,600 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +from typing import Any, Dict, Iterable, Optional, Union + +from .constants import AFFINITY_RETRIES, AFFINITY_DELAY +from .connection import AioConnection +from .datatypes import prop_codes +from .datatypes.base import IgniteDataType +from .datatypes.internal import AnyDataObject +from .exceptions import CacheCreationError, CacheError, ParameterError, connection_errors +from .utils import cache_id, status_to_exception +from .api.cache_config import ( + cache_create_async, cache_get_or_create_async, cache_destroy_async, cache_get_configuration_async, + cache_create_with_config_async, cache_get_or_create_with_config_async +) +from .api.key_value import ( + cache_get_async, cache_contains_key_async, cache_clear_key_async, cache_clear_keys_async, cache_clear_async, + cache_replace_async, cache_put_all_async, cache_get_all_async, cache_put_async, cache_contains_keys_async, + cache_get_and_put_async, cache_get_and_put_if_absent_async, cache_put_if_absent_async, cache_get_and_remove_async, + cache_get_and_replace_async, cache_remove_key_async, cache_remove_keys_async, cache_remove_all_async, + cache_remove_if_equals_async, cache_replace_if_equals_async, cache_get_size_async, +) +from .cursors import AioScanCursor +from .api.affinity import cache_get_node_partitions_async +from .cache import __parse_settings, BaseCacheMixin + + +async def get_cache(client: 'AioClient', settings: Union[str, dict]) -> 'AioCache': + name, settings = __parse_settings(settings) + if settings: + raise ParameterError('Only cache name allowed as a parameter') + + return AioCache(client, name) + + +async def create_cache(client: 'AioClient', settings: Union[str, dict]) -> 'AioCache': + name, settings = __parse_settings(settings) + + conn = await client.random_node() + if settings: + result = await cache_create_with_config_async(conn, settings) + else: + result = await cache_create_async(conn, name) + + if result.status != 0: + raise CacheCreationError(result.message) + + return AioCache(client, name) + + +async def get_or_create_cache(client: 'AioClient', settings: Union[str, dict]) -> 'AioCache': + name, settings = __parse_settings(settings) + + conn = await client.random_node() + if settings: + result = await cache_get_or_create_with_config_async(conn, settings) + else: + result = await cache_get_or_create_async(conn, name) + + if result.status != 0: + raise CacheCreationError(result.message) + + return AioCache(client, name) + + +class AioCache(BaseCacheMixin): + """ + Ignite cache abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.client.Client.create_cache`, + :py:meth:`~pyignite.client.Client.get_or_create_cache` or + :py:meth:`~pyignite.client.Client.get_cache` methods instead. See + :ref:`this example ` on how to do it. + """ + def __init__(self, client: 'AioClient', name: str): + """ + Initialize async cache object. For internal use. + + :param client: Async Ignite client, + :param name: Cache name. + """ + self._client = client + self._name = name + self._cache_id = cache_id(self._name) + self._settings = None + self._affinity_query_mux = asyncio.Lock() + self.affinity = {'version': (0, 0)} + + async def settings(self) -> Optional[dict]: + """ + Lazy Cache settings. See the :ref:`example ` + of reading this property. + + All cache properties are documented here: :ref:`cache_props`. + + :return: dict of cache properties and their values. + """ + if self._settings is None: + conn = await self.get_best_node() + config_result = await cache_get_configuration_async(conn, self._cache_id) + + if config_result.status == 0: + self._settings = config_result.value + else: + raise CacheError(config_result.message) + + return self._settings + + async def name(self) -> str: + """ + Lazy cache name. + + :return: cache name string. + """ + if self._name is None: + settings = await self.settings() + self._name = settings[prop_codes.PROP_NAME] + + return self._name + + @property + def client(self) -> 'AioClient': + """ + Ignite :class:`~pyignite.aio_client.AioClient` object. + + :return: Async client object, through which the cache is accessed. + """ + return self._client + + @property + def cache_id(self) -> int: + """ + Cache ID. + + :return: integer value of the cache ID. + """ + return self._cache_id + + @status_to_exception(CacheError) + async def destroy(self): + """ + Destroys cache with a given name. + """ + conn = await self.get_best_node() + return await cache_destroy_async(conn, self._cache_id) + + @status_to_exception(CacheError) + async def _get_affinity(self, conn: 'AioConnection') -> Dict: + """ + Queries server for affinity mappings. Retries in case + of an intermittent error (most probably “Getting affinity for topology + version earlier than affinity is calculated”). + + :param conn: connection to Igneite server, + :return: OP_CACHE_PARTITIONS operation result value. + """ + for _ in range(AFFINITY_RETRIES or 1): + result = await cache_get_node_partitions_async(conn, self._cache_id) + if result.status == 0 and result.value['partition_mapping']: + break + await asyncio.sleep(AFFINITY_DELAY) + + return result + + async def get_best_node(self, key: Any = None, key_hint: 'IgniteDataType' = None) -> 'AioConnection': + """ + Returns the node from the list of the nodes, opened by client, that + most probably contains the needed key-value pair. See IEP-23. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + + :param key: (optional) pythonic key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: Ignite connection object. + """ + conn = await self._client.random_node() + + if self.client.partition_aware and key is not None: + if self.__should_update_mapping(): + async with self._affinity_query_mux: + while self.__should_update_mapping(): + try: + full_affinity = await self._get_affinity(conn) + self._update_affinity(full_affinity) + + asyncio.ensure_future( + asyncio.gather( + *[conn.reconnect() for conn in self.client._nodes if not conn.alive], + return_exceptions=True + ) + ) + + break + except connection_errors: + # retry if connection failed + conn = await self._client.random_node() + pass + except CacheError: + # server did not create mapping in time + return conn + + parts = self.affinity.get('number_of_partitions') + + if not parts: + return conn + + key, key_hint = self._get_affinity_key(key, key_hint) + + hashcode = await key_hint.hashcode_async(key, self._client) + + best_node = self._get_node_by_hashcode(hashcode, parts) + if best_node: + return best_node + + return conn + + def __should_update_mapping(self): + return self.affinity['version'] < self._client.affinity_version + + @status_to_exception(CacheError) + async def get(self, key, key_hint: object = None) -> Any: + """ + Retrieves a value from cache by key. + + :param key: key for the cache entry. Can be of any supported type, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: value retrieved. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + result = await cache_get_async(conn, self._cache_id, key, key_hint=key_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def put(self, key, value, key_hint: object = None, value_hint: object = None): + """ + Puts a value with a given key to cache (overwriting existing value + if any). + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + return await cache_put_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) + + @status_to_exception(CacheError) + async def get_all(self, keys: list) -> list: + """ + Retrieves multiple key-value pairs from cache. + + :param keys: list of keys or tuples of (key, key_hint), + :return: a dict of key-value pairs. + """ + conn = await self.get_best_node() + result = await cache_get_all_async(conn, self._cache_id, keys) + if result.value: + keys = list(result.value.keys()) + values = await asyncio.gather(*[self.client.unwrap_binary(value) for value in result.value.values()]) + + for i, key in enumerate(keys): + result.value[key] = values[i] + return result + + @status_to_exception(CacheError) + async def put_all(self, pairs: dict): + """ + Puts multiple key-value pairs to cache (overwriting existing + associations if any). + + :param pairs: dictionary type parameters, contains key-value pairs + to save. Each key or value can be an item of representable + Python type or a tuple of (item, hint), + """ + conn = await self.get_best_node() + return await cache_put_all_async(conn, self._cache_id, pairs) + + @status_to_exception(CacheError) + async def replace(self, key, value, key_hint: object = None, value_hint: object = None): + """ + Puts a value with a given key to cache only if the key already exist. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + result = await cache_replace_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def clear(self, keys: Optional[list] = None): + """ + Clears the cache without notifying listeners or cache writers. + + :param keys: (optional) list of cache keys or (key, key type + hint) tuples to clear (default: clear all). + """ + conn = await self.get_best_node() + if keys: + return await cache_clear_keys_async(conn, self._cache_id, keys) + else: + return await cache_clear_async(conn, self._cache_id) + + @status_to_exception(CacheError) + async def clear_key(self, key, key_hint: object = None): + """ + Clears the cache key without notifying listeners or cache writers. + + :param key: key for the cache entry, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + return await cache_clear_key_async(conn, self._cache_id, key, key_hint=key_hint) + + @status_to_exception(CacheError) + async def clear_keys(self, keys: Iterable): + """ + Clears the cache key without notifying listeners or cache writers. + + :param keys: a list of keys or (key, type hint) tuples + """ + conn = await self.get_best_node() + return await cache_clear_keys_async(conn, self._cache_id, keys) + + @status_to_exception(CacheError) + async def contains_key(self, key, key_hint=None) -> bool: + """ + Returns a value indicating whether given key is present in cache. + + :param key: key for the cache entry. Can be of any supported type, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: boolean `True` when key is present, `False` otherwise. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + return await cache_contains_key_async(conn, self._cache_id, key, key_hint=key_hint) + + @status_to_exception(CacheError) + async def contains_keys(self, keys: Iterable) -> bool: + """ + Returns a value indicating whether all given keys are present in cache. + + :param keys: a list of keys or (key, type hint) tuples, + :return: boolean `True` when all keys are present, `False` otherwise. + """ + conn = await self.get_best_node() + return await cache_contains_keys_async(conn, self._cache_id, keys) + + @status_to_exception(CacheError) + async def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: + """ + Puts a value with a given key to cache, and returns the previous value + for that key, or null value if there was not such key. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + result = await cache_get_and_put_async(conn, self._cache_id, key, value, key_hint, value_hint) + + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def get_and_put_if_absent(self, key, value, key_hint=None, value_hint=None): + """ + Puts a value with a given key to cache only if the key does not + already exist. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted, + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + result = await cache_get_and_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def put_if_absent(self, key, value, key_hint=None, value_hint=None): + """ + Puts a value with a given key to cache only if the key does not + already exist. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + return await cache_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) + + @status_to_exception(CacheError) + async def get_and_remove(self, key, key_hint=None) -> Any: + """ + Removes the cache entry with specified key, returning the value. + + :param key: key for the cache entry. Can be of any supported type, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + result = await cache_get_and_remove_async(conn, self._cache_id, key, key_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def get_and_replace(self, key, value, key_hint=None, value_hint=None) -> Any: + """ + Puts a value with a given key to cache, returning previous value + for that key, if and only if there is a value currently mapped + for that key. + + :param key: key for the cache entry. Can be of any supported type, + :param value: value for the key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param value_hint: (optional) Ignite data type, for which the given + value should be converted. + :return: old value or None. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + result = await cache_get_and_replace_async(conn, self._cache_id, key, value, key_hint, value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def remove_key(self, key, key_hint=None): + """ + Clears the cache key without notifying listeners or cache writers. + + :param key: key for the cache entry, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + return await cache_remove_key_async(conn, self._cache_id, key, key_hint) + + @status_to_exception(CacheError) + async def remove_keys(self, keys: list): + """ + Removes cache entries by given list of keys, notifying listeners + and cache writers. + + :param keys: list of keys or tuples of (key, key_hint) to remove. + """ + conn = await self.get_best_node() + return await cache_remove_keys_async(conn, self._cache_id, keys) + + @status_to_exception(CacheError) + async def remove_all(self): + """ + Removes all cache entries, notifying listeners and cache writers. + """ + conn = await self.get_best_node() + return await cache_remove_all_async(conn, self._cache_id) + + @status_to_exception(CacheError) + async def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): + """ + Removes an entry with a given key if provided value is equal to + actual value, notifying listeners and cache writers. + + :param key: key for the cache entry, + :param sample: a sample to compare the stored value with, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param sample_hint: (optional) Ignite data type, for whic + the given sample should be converted. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + return await cache_remove_if_equals_async(conn, self._cache_id, key, sample, key_hint, sample_hint) + + @status_to_exception(CacheError) + async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint=None, value_hint=None) -> Any: + """ + Puts a value with a given key to cache only if the key already exists + and value equals provided sample. + + :param key: key for the cache entry, + :param sample: a sample to compare the stored value with, + :param value: new value for the given key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :param sample_hint: (optional) Ignite data type, for whic + the given sample should be converted + :param value_hint: (optional) Ignite data type, for which the given + value should be converted, + :return: boolean `True` when key is present, `False` otherwise. + """ + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + conn = await self.get_best_node(key, key_hint) + result = await cache_replace_if_equals_async(conn, self._cache_id, key, sample, value, key_hint, sample_hint, + value_hint) + result.value = await self.client.unwrap_binary(result.value) + return result + + @status_to_exception(CacheError) + async def get_size(self, peek_modes=0): + """ + Gets the number of entries in cache. + + :param peek_modes: (optional) limit count to near cache partition + (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache + (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), + :return: integer number of cache entries. + """ + conn = await self.get_best_node() + return await cache_get_size_async(conn, self._cache_id, peek_modes) + + def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False): + """ + Returns all key-value pairs from the cache, similar to `get_all`, but + with internal pagination, which is slower, but safer. + + :param page_size: (optional) page size. Default size is 1 (slowest + and safest), + :param partitions: (optional) number of partitions to query + (negative to query entire cache), + :param local: (optional) pass True if this query should be executed + on local node only. Defaults to False, + :return: async scan query cursor + """ + return AioScanCursor(self.client, self._cache_id, page_size, partitions, local) diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py new file mode 100644 index 0000000..d882969 --- /dev/null +++ b/pyignite/aio_client.py @@ -0,0 +1,358 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import random +from itertools import chain +from typing import Iterable, Type, Union, Any + +from .api.binary import get_binary_type_async, put_binary_type_async +from .api.cache_config import cache_get_names_async +from .client import BaseClient +from .cursors import AioSqlFieldsCursor +from .aio_cache import AioCache, get_cache, create_cache, get_or_create_cache +from .connection import AioConnection +from .constants import IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT +from .datatypes import BinaryObject +from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors +from .stream import AioBinaryStream, READ_BACKWARD +from .utils import cache_id, entity_id, status_to_exception, is_iterable, is_wrapped + + +__all__ = ['AioClient'] + + +class AioClient(BaseClient): + """ + Asynchronous Client implementation. + """ + + def __init__(self, compact_footer: bool = None, partition_aware: bool = False, **kwargs): + """ + Initialize client. + + :param compact_footer: (optional) use compact (True, recommended) or + full (False) schema approach when serializing Complex objects. + Default is to use the same approach the server is using (None). + Apache Ignite binary protocol documentation on this topic: + https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-schema + :param partition_aware: (optional) try to calculate the exact data + placement from the key before to issue the key operation to the + server node: + https://cwiki.apache.org/confluence/display/IGNITE/IEP-23%3A+Best+Effort+Affinity+for+thin+clients + The feature is in experimental status, so the parameter is `False` + by default. This will be changed later. + """ + super().__init__(compact_footer, partition_aware, **kwargs) + self._registry_mux = asyncio.Lock() + + async def connect(self, *args): + """ + Connect to Ignite cluster node(s). + + :param args: (optional) host(s) and port(s) to connect to. + """ + nodes = self._process_connect_args(*args) + + for i, node in enumerate(nodes): + host, port = node + conn = AioConnection(self, host, port, **self._connection_args) + + if not self.partition_aware: + try: + if self.protocol_version is None: + # open connection before adding to the pool + await conn.connect() + + # do not try to open more nodes + self._current_node = i + + except connection_errors: + conn.failed = True + + self._nodes.append(conn) + + if self.partition_aware: + connect_results = await asyncio.gather( + *[conn.connect() for conn in self._nodes], + return_exceptions=True + ) + + reconnect_coro = [] + for i, res in enumerate(connect_results): + if isinstance(res, Exception): + if isinstance(res, connection_errors): + reconnect_coro.append(self._nodes[i].reconnect()) + else: + raise res + + await asyncio.gather(*reconnect_coro, return_exceptions=True) + + if self.protocol_version is None: + raise ReconnectError('Can not connect.') + + async def close(self): + await asyncio.gather(*[conn.close() for conn in self._nodes], return_exceptions=True) + self._nodes.clear() + + async def random_node(self) -> AioConnection: + """ + Returns random usable node. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + """ + if self.partition_aware: + # if partition awareness is used just pick a random connected node + return await self._get_random_node() + else: + # if partition awareness is not used then just return the current + # node if it's alive or the next usable node if connection with the + # current is broken + node = self._nodes[self._current_node] + if node.alive: + return node + + # close current (supposedly failed) node + await self._nodes[self._current_node].close() + + # advance the node index + self._current_node += 1 + if self._current_node >= len(self._nodes): + self._current_node = 0 + + # prepare the list of node indexes to try to connect to + for i in chain(range(self._current_node, len(self._nodes)), range(self._current_node)): + node = self._nodes[i] + try: + await node.connect() + except connection_errors: + pass + else: + return node + + # no nodes left + raise ReconnectError('Can not reconnect: out of nodes.') + + async def _get_random_node(self, reconnect=True): + alive_nodes = [n for n in self._nodes if n.alive] + if alive_nodes: + return random.choice(alive_nodes) + elif reconnect: + await asyncio.gather(*[n.reconnect() for n in self._nodes], return_exceptions=True) + return await self._get_random_node(reconnect=False) + else: + # cannot choose from an empty sequence + raise ReconnectError('Can not reconnect: out of nodes.') from None + + @status_to_exception(BinaryTypeError) + async def get_binary_type(self, binary_type: Union[str, int]) -> dict: + """ + Gets the binary type information from the Ignite server. This is quite + a low-level implementation of Ignite thin client protocol's + `OP_GET_BINARY_TYPE` operation. You would probably want to use + :py:meth:`~pyignite.client.Client.query_binary_type` instead. + + :param binary_type: binary type name or ID, + :return: binary type description − a dict with the following fields: + + - `type_exists`: True if the type is registered, False otherwise. In + the latter case all the following fields are omitted, + - `type_id`: Complex object type ID, + - `type_name`: Complex object type name, + - `affinity_key_field`: string value or None, + - `is_enum`: False in case of Complex object registration, + - `schemas`: a list, containing the Complex object schemas in format: + OrderedDict[field name: field type hint]. A schema can be empty. + """ + conn = await self.random_node() + result = await get_binary_type_async(conn, binary_type) + return self._process_get_binary_type_result(result) + + @status_to_exception(BinaryTypeError) + async def put_binary_type(self, type_name: str, affinity_key_field: str = None, is_enum=False, schema: dict = None): + """ + Registers binary type information in cluster. Do not update binary + registry. This is a literal implementation of Ignite thin client + protocol's `OP_PUT_BINARY_TYPE` operation. You would probably want + to use :py:meth:`~pyignite.client.Client.register_binary_type` instead. + + :param type_name: name of the data type being registered, + :param affinity_key_field: (optional) name of the affinity key field, + :param is_enum: (optional) register enum if True, binary object + otherwise. Defaults to False, + :param schema: (optional) when register enum, pass a dict + of enumerated parameter names as keys and an integers as values. + When register binary type, pass a dict of field names: field types. + Binary type with no fields is OK. + """ + conn = await self.random_node() + return await put_binary_type_async(conn, type_name, affinity_key_field, is_enum, schema) + + async def register_binary_type(self, data_class: Type, affinity_key_field: str = None): + """ + Register the given class as a representation of a certain Complex + object type. Discards autogenerated or previously registered class. + + :param data_class: Complex object class, + :param affinity_key_field: (optional) affinity parameter. + """ + if not await self.query_binary_type(data_class.type_id, data_class.schema_id): + await self.put_binary_type(data_class.type_name, affinity_key_field, schema=data_class.schema) + + self._registry[data_class.type_id][data_class.schema_id] = data_class + + async def query_binary_type(self, binary_type: Union[int, str], schema: Union[int, dict] = None): + """ + Queries the registry of Complex object classes. + + :param binary_type: Complex object type name or ID, + :param schema: (optional) Complex object schema or schema ID, + :return: found dataclass or None, if `schema` parameter is provided, + a dict of {schema ID: dataclass} format otherwise. + """ + type_id = entity_id(binary_type) + + result = self._get_from_registry(type_id, schema) + + if not result: + async with self._registry_mux: + result = self._get_from_registry(type_id, schema) + + if not result: + type_info = await self.get_binary_type(type_id) + self._sync_binary_registry(type_id, type_info) + return self._get_from_registry(type_id, schema) + + return result + + async def unwrap_binary(self, value: Any) -> Any: + """ + Detects and recursively unwraps Binary Object. + + :param value: anything that could be a Binary Object, + :return: the result of the Binary Object unwrapping with all other data + left intact. + """ + if is_wrapped(value): + blob, offset = value + with AioBinaryStream(self, blob) as stream: + data_class = await BinaryObject.parse_async(stream) + return await BinaryObject.to_python_async(stream.read_ctype(data_class, direction=READ_BACKWARD), self) + return value + + async def create_cache(self, settings: Union[str, dict]) -> 'AioCache': + """ + Creates Ignite cache by name. Raises `CacheError` if such a cache is + already exists. + + :param settings: cache name or dict of cache properties' codes + and values. All cache properties are documented here: + :ref:`cache_props`. See also the + :ref:`cache creation example `, + :return: :class:`~pyignite.cache.Cache` object. + """ + return await create_cache(self, settings) + + async def get_or_create_cache(self, settings: Union[str, dict]) -> 'AioCache': + """ + Creates Ignite cache, if not exist. + + :param settings: cache name or dict of cache properties' codes + and values. All cache properties are documented here: + :ref:`cache_props`. See also the + :ref:`cache creation example `, + :return: :class:`~pyignite.cache.Cache` object. + """ + return await get_or_create_cache(self, settings) + + async def get_cache(self, settings: Union[str, dict]) -> 'AioCache': + """ + Creates Cache object with a given cache name without checking it up + on server. If such a cache does not exist, some kind of exception + (most probably `CacheError`) may be raised later. + + :param settings: cache name or cache properties (but only `PROP_NAME` + property is allowed), + :return: :class:`~pyignite.cache.Cache` object. + """ + return await get_cache(self, settings) + + @status_to_exception(CacheError) + async def get_cache_names(self) -> list: + """ + Gets existing cache names. + + :return: list of cache names. + """ + conn = await self.random_node() + return await cache_get_names_async(conn) + + def sql( + self, query_str: str, page_size: int = 1024, + query_args: Iterable = None, schema: str = 'PUBLIC', + statement_type: int = 0, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, + max_rows: int = -1, timeout: int = 0, + cache: Union[int, str, 'AioCache'] = None + ): + """ + Runs an SQL query and returns its result. + + :param query_str: SQL query string, + :param page_size: (optional) cursor page size. Default is 1024, which + means that client makes one server call per 1024 rows, + :param query_args: (optional) query arguments. List of values or + (value, type hint) tuples, + :param schema: (optional) schema for the query. Defaults to `PUBLIC`, + :param statement_type: (optional) statement type. Can be: + + * StatementType.ALL − any type (default), + * StatementType.SELECT − select, + * StatementType.UPDATE − update. + + :param distributed_joins: (optional) distributed joins. Defaults + to False, + :param local: (optional) pass True if this query should be executed + on local node only. Defaults to False, + :param replicated_only: (optional) whether query contains only + replicated tables or not. Defaults to False, + :param enforce_join_order: (optional) enforce join order. Defaults + to False, + :param collocated: (optional) whether your data is co-located or not. + Defaults to False, + :param lazy: (optional) lazy query execution. Defaults to False, + :param include_field_names: (optional) include field names in result. + Defaults to False, + :param max_rows: (optional) query-wide maximum of rows. Defaults to -1 + (all rows), + :param timeout: (optional) non-negative timeout value in ms. + Zero disables timeout (default), + :param cache (optional) Name or ID of the cache to use to infer schema. + If set, 'schema' argument is ignored, + :return: generator with result rows as a lists. If + `include_field_names` was set, the first row will hold field names. + """ + + c_id = cache.cache_id if isinstance(cache, AioCache) else cache_id(cache) + + if c_id != 0: + schema = None + + return AioSqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, + lazy, include_field_names, max_rows, timeout) diff --git a/pyignite/api/__init__.py b/pyignite/api/__init__.py index 7dbef0a..7deed8c 100644 --- a/pyignite/api/__init__.py +++ b/pyignite/api/__init__.py @@ -23,53 +23,55 @@ stable end user API see :mod:`pyignite.client` module. """ +# flake8: noqa + from .affinity import ( - cache_get_node_partitions, + cache_get_node_partitions, cache_get_node_partitions_async, ) from .cache_config import ( - cache_create, - cache_get_names, - cache_get_or_create, - cache_destroy, - cache_get_configuration, - cache_create_with_config, - cache_get_or_create_with_config, + cache_create, cache_create_async, + cache_get_names, cache_get_names_async, + cache_get_or_create, cache_get_or_create_async, + cache_destroy, cache_destroy_async, + cache_get_configuration, cache_get_configuration_async, + cache_create_with_config, cache_create_with_config_async, + cache_get_or_create_with_config, cache_get_or_create_with_config_async, ) from .key_value import ( - cache_get, - cache_put, - cache_get_all, - cache_put_all, - cache_contains_key, - cache_contains_keys, - cache_get_and_put, - cache_get_and_replace, - cache_get_and_remove, - cache_put_if_absent, - cache_get_and_put_if_absent, - cache_replace, - cache_replace_if_equals, - cache_clear, - cache_clear_key, - cache_clear_keys, - cache_remove_key, - cache_remove_if_equals, - cache_remove_keys, - cache_remove_all, - cache_get_size, - cache_local_peek, + cache_get, cache_get_async, + cache_put, cache_put_async, + cache_get_all, cache_get_all_async, + cache_put_all, cache_put_all_async, + cache_contains_key, cache_contains_key_async, + cache_contains_keys, cache_contains_keys_async, + cache_get_and_put, cache_get_and_put_async, + cache_get_and_replace, cache_get_and_replace_async, + cache_get_and_remove, cache_get_and_remove_async, + cache_put_if_absent, cache_put_if_absent_async, + cache_get_and_put_if_absent, cache_get_and_put_if_absent_async, + cache_replace, cache_replace_async, + cache_replace_if_equals, cache_replace_if_equals_async, + cache_clear, cache_clear_async, + cache_clear_key, cache_clear_key_async, + cache_clear_keys, cache_clear_keys_async, + cache_remove_key, cache_remove_key_async, + cache_remove_if_equals, cache_remove_if_equals_async, + cache_remove_keys, cache_remove_keys_async, + cache_remove_all, cache_remove_all_async, + cache_get_size, cache_get_size_async, + cache_local_peek, cache_local_peek_async, ) from .sql import ( - scan, - scan_cursor_get_page, + scan, scan_async, + scan_cursor_get_page, scan_cursor_get_page_async, sql, sql_cursor_get_page, - sql_fields, - sql_fields_cursor_get_page, - resource_close, + sql_fields, sql_fields_async, + sql_fields_cursor_get_page, sql_fields_cursor_get_page_async, + resource_close, resource_close_async ) from .binary import ( - get_binary_type, - put_binary_type, + get_binary_type, get_binary_type_async, + put_binary_type, put_binary_type_async ) from .result import APIResult diff --git a/pyignite/api/affinity.py b/pyignite/api/affinity.py index 7d09517..ddf1e7a 100644 --- a/pyignite/api/affinity.py +++ b/pyignite/api/affinity.py @@ -15,9 +15,10 @@ from typing import Iterable, Union +from pyignite.connection import AioConnection, Connection from pyignite.datatypes import Bool, Int, Long, UUIDObject from pyignite.datatypes.internal import StructArray, Conditional, Struct -from pyignite.queries import Query +from pyignite.queries import Query, query_perform from pyignite.queries.op_codes import OP_CACHE_PARTITIONS from pyignite.utils import is_iterable from .result import APIResult @@ -67,10 +68,7 @@ ]) -def cache_get_node_partitions( - conn: 'Connection', caches: Union[int, Iterable[int]], - query_id: int = None, -) -> APIResult: +def cache_get_node_partitions(conn: 'Connection', caches: Union[int, Iterable[int]], query_id: int = None) -> APIResult: """ Gets partition mapping for an Ignite cache or a number of caches. See “IEP-23: Best Effort Affinity for thin clients”. @@ -82,6 +80,62 @@ def cache_get_node_partitions( is generated, :return: API result data object. """ + return __cache_get_node_partitions(conn, caches, query_id) + + +async def cache_get_node_partitions_async(conn: 'AioConnection', caches: Union[int, Iterable[int]], + query_id: int = None) -> APIResult: + """ + Async version of cache_get_node_partitions. + """ + return await __cache_get_node_partitions(conn, caches, query_id) + + +def __post_process_partitions(result): + if result.status == 0: + # tidying up the result + value = { + 'version': ( + result.value['version_major'], + result.value['version_minor'] + ), + 'partition_mapping': {}, + } + for partition_map in result.value['partition_mapping']: + is_applicable = partition_map['is_applicable'] + + node_mapping = None + if is_applicable: + node_mapping = { + p['node_uuid']: set(x['partition_id'] for x in p['node_partitions']) + for p in partition_map['node_mapping'] + } + + for cache_info in partition_map['cache_mapping']: + cache_id = cache_info['cache_id'] + + cache_partition_mapping = { + 'is_applicable': is_applicable, + } + + parts = 0 + if is_applicable: + cache_partition_mapping['cache_config'] = { + a['key_type_id']: a['affinity_key_field_id'] + for a in cache_info['cache_config'] + } + cache_partition_mapping['node_mapping'] = node_mapping + + parts = sum(len(p) for p in cache_partition_mapping['node_mapping'].values()) + + cache_partition_mapping['number_of_partitions'] = parts + + value['partition_mapping'][cache_id] = cache_partition_mapping + result.value = value + return result + + +def __cache_get_node_partitions(conn, caches, query_id): query_struct = Query( OP_CACHE_PARTITIONS, [ @@ -92,7 +146,8 @@ def cache_get_node_partitions( if not is_iterable(caches): caches = [caches] - result = query_struct.perform( + return query_perform( + query_struct, conn, query_params={ 'cache_ids': [{'cache_id': cache} for cache in caches], @@ -102,36 +157,5 @@ def cache_get_node_partitions( ('version_minor', Int), ('partition_mapping', partition_mapping), ], + post_process_fun=__post_process_partitions ) - if result.status == 0: - # tidying up the result - value = { - 'version': ( - result.value['version_major'], - result.value['version_minor'] - ), - 'partition_mapping': [], - } - for i, partition_map in enumerate(result.value['partition_mapping']): - cache_id = partition_map['cache_mapping'][0]['cache_id'] - value['partition_mapping'].insert( - i, - { - 'cache_id': cache_id, - 'is_applicable': partition_map['is_applicable'], - } - ) - if partition_map['is_applicable']: - value['partition_mapping'][i]['cache_config'] = { - a['key_type_id']: a['affinity_key_field_id'] - for a in partition_map['cache_mapping'][0]['cache_config'] - } - value['partition_mapping'][i]['node_mapping'] = { - p['node_uuid']: [ - x['partition_id'] for x in p['node_partitions'] - ] - for p in partition_map['node_mapping'] - } - result.value = value - - return result diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index 87a5232..345e8e8 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -15,17 +15,15 @@ from typing import Union -from pyignite.constants import * -from pyignite.datatypes.binary import ( - body_struct, enum_struct, schema_struct, binary_fields_struct, -) +from pyignite.connection import Connection, AioConnection +from pyignite.constants import PROTOCOL_BYTE_ORDER +from pyignite.datatypes.binary import enum_struct, schema_struct, binary_fields_struct from pyignite.datatypes import String, Int, Bool -from pyignite.queries import Query -from pyignite.queries.op_codes import * +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import OP_GET_BINARY_TYPE, OP_PUT_BINARY_TYPE from pyignite.utils import entity_id, schema_id from .result import APIResult -from ..stream import BinaryStream, READ_BACKWARD -from ..queries.response import Response +from ..queries.response import BinaryTypeResponse def get_binary_type(conn: 'Connection', binary_type: Union[str, int], query_id=None) -> APIResult: @@ -39,75 +37,33 @@ def get_binary_type(conn: 'Connection', binary_type: Union[str, int], query_id=N is generated, :return: API result data object. """ + return __get_binary_type(conn, binary_type, query_id) + +async def get_binary_type_async(conn: 'AioConnection', binary_type: Union[str, int], query_id=None) -> APIResult: + """ + Async version of get_binary_type. + """ + return await __get_binary_type(conn, binary_type, query_id) + + +def __get_binary_type(conn, binary_type, query_id): query_struct = Query( OP_GET_BINARY_TYPE, [ ('type_id', Int), ], query_id=query_id, + response_type=BinaryTypeResponse ) - with BinaryStream(conn) as stream: - query_struct.from_python(stream, { - 'type_id': entity_id(binary_type), - }) - conn.send(stream.getbuffer()) - - response_head_struct = Response(protocol_version=conn.get_protocol_version(), - following=[('type_exists', Bool)]) - - with BinaryStream(conn, conn.recv()) as stream: - init_pos = stream.tell() - response_head_type = response_head_struct.parse(stream) - response_head = stream.read_ctype(response_head_type, direction=READ_BACKWARD) - - response_parts = [] - if response_head.type_exists: - resp_body_type = body_struct.parse(stream) - response_parts.append(('body', resp_body_type)) - resp_body = stream.read_ctype(resp_body_type, direction=READ_BACKWARD) - if resp_body.is_enum: - resp_enum = enum_struct.parse(stream) - response_parts.append(('enums', resp_enum)) - - resp_schema_type = schema_struct.parse(stream) - response_parts.append(('schema', resp_schema_type)) - - response_class = type( - 'GetBinaryTypeResponse', - (response_head_type,), - { - '_pack_': 1, - '_fields_': response_parts, - } - ) - response = stream.read_ctype(response_class, position=init_pos) + return query_perform(query_struct, conn, query_params={ + 'type_id': entity_id(binary_type), + }) - result = APIResult(response) - if result.status != 0: - return result - result.value = { - 'type_exists': Bool.to_python(response.type_exists) - } - if hasattr(response, 'body'): - result.value.update(body_struct.to_python(response.body)) - if hasattr(response, 'enums'): - result.value['enums'] = enum_struct.to_python(response.enums) - if hasattr(response, 'schema'): - result.value['schema'] = { - x['schema_id']: [ - z['schema_field_id'] for z in x['schema_fields'] - ] - for x in schema_struct.to_python(response.schema) - } - return result - - -def put_binary_type( - connection: 'Connection', type_name: str, affinity_key_field: str=None, - is_enum=False, schema: dict=None, query_id=None, -) -> APIResult: + +def put_binary_type(connection: 'Connection', type_name: str, affinity_key_field: str = None, + is_enum=False, schema: dict = None, query_id=None) -> APIResult: """ Registers binary type information in cluster. @@ -125,6 +81,29 @@ def put_binary_type( is generated, :return: API result data object. """ + return __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema, query_id) + + +async def put_binary_type_async(connection: 'AioConnection', type_name: str, affinity_key_field: str = None, + is_enum=False, schema: dict = None, query_id=None) -> APIResult: + """ + Async version of put_binary_type. + """ + return await __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema, query_id) + + +def __post_process_put_binary(type_id): + def internal(result): + if result.status == 0: + result.value = { + 'type_id': type_id, + 'schema_id': schema_id, + } + return result + return internal + + +def __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema, query_id): # prepare data if schema is None: schema = {} @@ -195,10 +174,5 @@ def put_binary_type( ], query_id=query_id, ) - result = query_struct.perform(connection, query_params=data) - if result.status == 0: - result.value = { - 'type_id': type_id, - 'schema_id': schema_id, - } - return result + return query_perform(query_struct, connection, query_params=data, + post_process_fun=__post_process_put_binary(type_id)) diff --git a/pyignite/api/cache_config.py b/pyignite/api/cache_config.py index cfea416..0adb549 100644 --- a/pyignite/api/cache_config.py +++ b/pyignite/api/cache_config.py @@ -25,15 +25,19 @@ from typing import Union +from pyignite.connection import Connection, AioConnection from pyignite.datatypes.cache_config import cache_config_struct from pyignite.datatypes.cache_properties import prop_map -from pyignite.datatypes import ( - Int, Byte, prop_codes, Short, String, StringArray, +from pyignite.datatypes import Int, Byte, prop_codes, Short, String, StringArray +from pyignite.queries import Query, ConfigQuery, query_perform +from pyignite.queries.op_codes import ( + OP_CACHE_GET_CONFIGURATION, OP_CACHE_CREATE_WITH_NAME, OP_CACHE_GET_OR_CREATE_WITH_NAME, OP_CACHE_DESTROY, + OP_CACHE_GET_NAMES, OP_CACHE_CREATE_WITH_CONFIGURATION, OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION ) -from pyignite.queries import Query, ConfigQuery -from pyignite.queries.op_codes import * from pyignite.utils import cache_id +from .result import APIResult + def compact_cache_config(cache_config: dict) -> dict: """ @@ -48,14 +52,13 @@ def compact_cache_config(cache_config: dict) -> dict: for k, v in cache_config.items(): if k == 'length': continue - prop_code = getattr(prop_codes, 'PROP_{}'.format(k.upper())) + prop_code = getattr(prop_codes, f'PROP_{k.upper()}') result[prop_code] = v return result -def cache_get_configuration( - connection: 'Connection', cache: Union[str, int], flags: int=0, query_id=None, -) -> 'APIResult': +def cache_get_configuration(connection: 'Connection', cache: Union[str, int], + flags: int = 0, query_id=None) -> 'APIResult': """ Gets configuration for the given cache. @@ -68,7 +71,24 @@ def cache_get_configuration( :return: API result data object. Result value is OrderedDict with the cache configuration parameters. """ + return __cache_get_configuration(connection, cache, flags, query_id) + + +async def cache_get_configuration_async(connection: 'AioConnection', cache: Union[str, int], + flags: int = 0, query_id=None) -> 'APIResult': + """ + Async version of cache_get_configuration. + """ + return await __cache_get_configuration(connection, cache, flags, query_id) + + +def __post_process_cache_config(result): + if result.status == 0: + result.value = compact_cache_config(result.value['cache_config']) + return result + +def __cache_get_configuration(connection, cache, flags, query_id): query_struct = Query( OP_CACHE_GET_CONFIGURATION, [ @@ -77,24 +97,19 @@ def cache_get_configuration( ], query_id=query_id, ) - result = query_struct.perform( - connection, - query_params={ - 'hash_code': cache_id(cache), - 'flags': flags, - }, - response_config=[ - ('cache_config', cache_config_struct), - ], - ) - if result.status == 0: - result.value = compact_cache_config(result.value['cache_config']) - return result - - -def cache_create( - connection: 'Connection', name: str, query_id=None, -) -> 'APIResult': + return query_perform(query_struct, connection, + query_params={ + 'hash_code': cache_id(cache), + 'flags': flags + }, + response_config=[ + ('cache_config', cache_config_struct) + ], + post_process_fun=__post_process_cache_config + ) + + +def cache_create(connection: 'Connection', name: str, query_id=None) -> 'APIResult': """ Creates a cache with a given name. Returns error if a cache with specified name already exists. @@ -108,24 +123,18 @@ def cache_create( created successfully, non-zero status and an error description otherwise. """ - query_struct = Query( - OP_CACHE_CREATE_WITH_NAME, - [ - ('cache_name', String), - ], - query_id=query_id, - ) - return query_struct.perform( - connection, - query_params={ - 'cache_name': name, - }, - ) + return __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name, query_id) -def cache_get_or_create( - connection: 'Connection', name: str, query_id=None, -) -> 'APIResult': +async def cache_create_async(connection: 'AioConnection', name: str, query_id=None) -> 'APIResult': + """ + Async version of cache_create. + """ + + return await __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name, query_id) + + +def cache_get_or_create(connection: 'Connection', name: str, query_id=None) -> 'APIResult': """ Creates a cache with a given name. Does nothing if the cache exists. @@ -138,24 +147,22 @@ def cache_get_or_create( created successfully, non-zero status and an error description otherwise. """ - query_struct = Query( - OP_CACHE_GET_OR_CREATE_WITH_NAME, - [ - ('cache_name', String), - ], - query_id=query_id, - ) - return query_struct.perform( - connection, - query_params={ - 'cache_name': name, - }, - ) + return __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name, query_id) + + +async def cache_get_or_create_async(connection: 'AioConnection', name: str, query_id=None) -> 'APIResult': + """ + Async version of cache_get_or_create. + """ + return await __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name, query_id) + +def __cache_create_with_name(op_code, conn, name, query_id): + query_struct = Query(op_code, [('cache_name', String)], query_id=query_id) + return query_perform(query_struct, conn, query_params={'cache_name': name}) -def cache_destroy( - connection: 'Connection', cache: Union[str, int], query_id=None, -) -> 'APIResult': + +def cache_destroy(connection: 'Connection', cache: Union[str, int], query_id=None) -> 'APIResult': """ Destroys cache with a given name. @@ -166,19 +173,20 @@ def cache_destroy( is generated, :return: API result data object. """ + return __cache_destroy(connection, cache, query_id) - query_struct = Query( - OP_CACHE_DESTROY,[ - ('hash_code', Int), - ], - query_id=query_id, - ) - return query_struct.perform( - connection, - query_params={ - 'hash_code': cache_id(cache), - }, - ) + +async def cache_destroy_async(connection: 'AioConnection', cache: Union[str, int], query_id=None) -> 'APIResult': + """ + Async version of cache_destroy. + """ + return await __cache_destroy(connection, cache, query_id) + + +def __cache_destroy(connection, cache, query_id): + query_struct = Query(OP_CACHE_DESTROY, [('hash_code', Int)], query_id=query_id) + + return query_perform(query_struct, connection, query_params={'hash_code': cache_id(cache)}) def cache_get_names(connection: 'Connection', query_id=None) -> 'APIResult': @@ -193,21 +201,30 @@ def cache_get_names(connection: 'Connection', query_id=None) -> 'APIResult': names, non-zero status and an error description otherwise. """ - query_struct = Query(OP_CACHE_GET_NAMES, query_id=query_id) - result = query_struct.perform( - connection, - response_config=[ - ('cache_names', StringArray), - ], - ) + return __cache_get_names(connection, query_id) + + +async def cache_get_names_async(connection: 'AioConnection', query_id=None) -> 'APIResult': + """ + Async version of cache_get_names. + """ + return await __cache_get_names(connection, query_id) + + +def __post_process_cache_names(result): if result.status == 0: result.value = result.value['cache_names'] return result -def cache_create_with_config( - connection: 'Connection', cache_props: dict, query_id=None, -) -> 'APIResult': +def __cache_get_names(connection, query_id): + query_struct = Query(OP_CACHE_GET_NAMES, query_id=query_id) + return query_perform(query_struct, connection, + response_config=[('cache_names', StringArray)], + post_process_fun=__post_process_cache_names) + + +def cache_create_with_config(connection: 'Connection', cache_props: dict, query_id=None) -> 'APIResult': """ Creates cache with provided configuration. An error is returned if the name is already in use. @@ -222,29 +239,17 @@ def cache_create_with_config( :return: API result data object. Contains zero status if cache was created, non-zero status and an error description otherwise. """ + return __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props, query_id) - prop_types = {} - prop_values = {} - for i, prop_item in enumerate(cache_props.items()): - prop_code, prop_value = prop_item - prop_name = 'property_{}'.format(i) - prop_types[prop_name] = prop_map(prop_code) - prop_values[prop_name] = prop_value - prop_values['param_count'] = len(cache_props) - query_struct = ConfigQuery( - OP_CACHE_CREATE_WITH_CONFIGURATION, - [ - ('param_count', Short), - ] + list(prop_types.items()), - query_id=query_id, - ) - return query_struct.perform(connection, query_params=prop_values) +async def cache_create_with_config_async(connection: 'AioConnection', cache_props: dict, query_id=None) -> 'APIResult': + """ + Async version of cache_create_with_config. + """ + return await __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props, query_id) -def cache_get_or_create_with_config( - connection: 'Connection', cache_props: dict, query_id=None, -) -> 'APIResult': +def cache_get_or_create_with_config(connection: 'Connection', cache_props: dict, query_id=None) -> 'APIResult': """ Creates cache with provided configuration. Does nothing if the name is already in use. @@ -259,9 +264,20 @@ def cache_get_or_create_with_config( :return: API result data object. Contains zero status if cache was created, non-zero status and an error description otherwise. """ + return __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props, query_id) + + +async def cache_get_or_create_with_config_async(connection: 'AioConnection', cache_props: dict, + query_id=None) -> 'APIResult': + """ + Async version of cache_get_or_create_with_config. + """ + return await __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props, + query_id) + - prop_types = {} - prop_values = {} +def __cache_create_with_config(op_code, connection, cache_props, query_id): + prop_types, prop_values = {}, {} for i, prop_item in enumerate(cache_props.items()): prop_code, prop_value = prop_item prop_name = 'property_{}'.format(i) @@ -269,11 +285,6 @@ def cache_get_or_create_with_config( prop_values[prop_name] = prop_value prop_values['param_count'] = len(cache_props) - query_struct = ConfigQuery( - OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, - [ - ('param_count', Short), - ] + list(prop_types.items()), - query_id=query_id, - ) - return query_struct.perform(connection, query_params=prop_values) + following = [('param_count', Short)] + list(prop_types.items()) + query_struct = ConfigQuery(op_code, following, query_id=query_id) + return query_perform(query_struct, connection, query_params=prop_values) diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 25601e9..6d5663c 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -15,20 +15,26 @@ from typing import Any, Iterable, Optional, Union -from pyignite.queries.op_codes import * -from pyignite.datatypes import ( - Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, +from pyignite.connection import AioConnection, Connection +from pyignite.queries.op_codes import ( + OP_CACHE_PUT, OP_CACHE_GET, OP_CACHE_GET_ALL, OP_CACHE_PUT_ALL, OP_CACHE_CONTAINS_KEY, OP_CACHE_CONTAINS_KEYS, + OP_CACHE_GET_AND_PUT, OP_CACHE_GET_AND_REPLACE, OP_CACHE_GET_AND_REMOVE, OP_CACHE_PUT_IF_ABSENT, + OP_CACHE_GET_AND_PUT_IF_ABSENT, OP_CACHE_REPLACE, OP_CACHE_REPLACE_IF_EQUALS, OP_CACHE_CLEAR, OP_CACHE_CLEAR_KEY, + OP_CACHE_CLEAR_KEYS, OP_CACHE_REMOVE_KEY, OP_CACHE_REMOVE_IF_EQUALS, OP_CACHE_REMOVE_KEYS, OP_CACHE_REMOVE_ALL, + OP_CACHE_GET_SIZE, OP_CACHE_LOCAL_PEEK ) +from pyignite.datatypes import Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject +from pyignite.datatypes.base import IgniteDataType from pyignite.datatypes.key_value import PeekModes -from pyignite.queries import Query +from pyignite.queries import Query, query_perform from pyignite.utils import cache_id +from .result import APIResult -def cache_put( - connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': + +def cache_put(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache (overwriting existing value if any). @@ -48,7 +54,19 @@ def cache_put( :return: API result data object. Contains zero status if a value is written, non-zero status and an error description otherwise. """ + return __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + + +async def cache_put_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_put + """ + return await __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + +def __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id): query_struct = Query( OP_CACHE_PUT, [ @@ -59,19 +77,19 @@ def cache_put( ], query_id=query_id, ) - return query_struct.perform(connection, { - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, - 'key': key, - 'value': value, - }) + return query_perform( + query_struct, connection, + query_params={ + 'hash_code': cache_id(cache), + 'flag': 1 if binary else 0, + 'key': key, + 'value': value + } + ) -def cache_get( - connection: 'Connection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_get(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Retrieves a value from cache by key. @@ -88,7 +106,19 @@ def cache_get( :return: API result data object. Contains zero status and a value retrieved on success, non-zero status and an error description on failure. """ + return __cache_get(connection, cache, key, key_hint, binary, query_id) + + +async def cache_get_async(connection: 'AioConnection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_get + """ + return await __cache_get(connection, cache, key, key_hint, binary, query_id) + +def __cache_get(connection, cache, key, key_hint, binary, query_id): query_struct = Query( OP_CACHE_GET, [ @@ -98,27 +128,22 @@ def cache_get( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'key': key, }, response_config=[ - ('value', AnyDataObject), + ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status != 0: - return result - result.value = result.value['value'] - return result -def cache_get_all( - connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_get_all(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Retrieves multiple key-value pairs from cache. @@ -134,7 +159,18 @@ def cache_get_all( retrieved key-value pairs, non-zero status and an error description on failure. """ + return __cache_get_all(connection, cache, keys, binary, query_id) + + +async def cache_get_all_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_get_all. + """ + return await __cache_get_all(connection, cache, keys, binary, query_id) + +def __cache_get_all(connection, cache, keys, binary, query_id): query_struct = Query( OP_CACHE_GET_ALL, [ @@ -144,8 +180,8 @@ def cache_get_all( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -154,16 +190,12 @@ def cache_get_all( response_config=[ ('data', Map), ], + post_process_fun=__post_process_value_by_key('data') ) - if result.status == 0: - result.value = dict(result.value)['data'] - return result -def cache_put_all( - connection: 'Connection', cache: Union[str, int], pairs: dict, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_put_all(connection: 'Connection', cache: Union[str, int], pairs: dict, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Puts multiple key-value pairs to cache (overwriting existing associations if any). @@ -181,7 +213,18 @@ def cache_put_all( :return: API result data object. Contains zero status if key-value pairs are written, non-zero status and an error description otherwise. """ + return __cache_put_all(connection, cache, pairs, binary, query_id) + +async def cache_put_all_async(connection: 'AioConnection', cache: Union[str, int], pairs: dict, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_put_all. + """ + return await __cache_put_all(connection, cache, pairs, binary, query_id) + + +def __cache_put_all(connection, cache, pairs, binary, query_id): query_struct = Query( OP_CACHE_PUT_ALL, [ @@ -191,8 +234,8 @@ def cache_put_all( ], query_id=query_id, ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -201,11 +244,8 @@ def cache_put_all( ) -def cache_contains_key( - connection: 'Connection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_contains_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether given key is present in cache. @@ -223,7 +263,19 @@ def cache_contains_key( retrieved on success: `True` when key is present, `False` otherwise, non-zero status and an error description on failure. """ + return __cache_contains_key(connection, cache, key, key_hint, binary, query_id) + + +async def cache_contains_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_contains_key. + """ + return await __cache_contains_key(connection, cache, key, key_hint, binary, query_id) + +def __cache_contains_key(connection, cache, key, key_hint, binary, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEY, [ @@ -233,9 +285,9 @@ def cache_contains_key( ], query_id=query_id, ) - result = query_struct.perform( - connection, - query_params={ + return query_perform( + query_struct, connection, + query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, 'key': key, @@ -243,16 +295,12 @@ def cache_contains_key( response_config=[ ('value', Bool), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_contains_keys( - connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_contains_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether all given keys are present in cache. @@ -268,7 +316,18 @@ def cache_contains_keys( retrieved on success: `True` when all keys are present, `False` otherwise, non-zero status and an error description on failure. """ + return __cache_contains_keys(connection, cache, keys, binary, query_id) + + +async def cache_contains_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_contains_keys. + """ + return await __cache_contains_keys(connection, cache, keys, binary, query_id) + +def __cache_contains_keys(connection, cache, keys, binary, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ @@ -278,8 +337,8 @@ def cache_contains_keys( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -288,17 +347,13 @@ def cache_contains_keys( response_config=[ ('value', Bool), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_get_and_put( - connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_get_and_put(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache, and returns the previous value for that key, or null value if there was not such key. @@ -320,7 +375,19 @@ def cache_get_and_put( or None if a value is written, non-zero status and an error description in case of error. """ + return __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + +async def cache_get_and_put_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_get_and_put. + """ + return await __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + + +def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT, [ @@ -331,8 +398,8 @@ def cache_get_and_put( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -342,17 +409,13 @@ def cache_get_and_put( response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_get_and_replace( - connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_get_and_replace(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache, returning previous value for that key, if and only if there is a value currently mapped @@ -374,7 +437,19 @@ def cache_get_and_replace( :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ + return __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id) + + +async def cache_get_and_replace_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_get_and_replace. + """ + return await __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id) + +def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id): query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('hash_code', Int), @@ -384,8 +459,8 @@ def cache_get_and_replace( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -395,17 +470,12 @@ def cache_get_and_replace( response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_get_and_remove( - connection: 'Connection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_get_and_remove(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Removes the cache entry with specified key, returning the value. @@ -422,7 +492,16 @@ def cache_get_and_remove( :return: API result data object. Contains zero status and an old value or None, non-zero status and an error description otherwise. """ + return __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id) + +async def cache_get_and_remove_async(connection: 'AioConnection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + return await __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id) + + +def __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id): query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('hash_code', Int), @@ -431,8 +510,8 @@ def cache_get_and_remove( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -441,17 +520,13 @@ def cache_get_and_remove( response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_put_if_absent( - connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_put_if_absent(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. @@ -472,7 +547,19 @@ def cache_put_if_absent( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + +async def cache_put_if_absent_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_put_if_absent. + """ + return await __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + + +def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id): query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ @@ -483,8 +570,8 @@ def cache_put_if_absent( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -494,17 +581,13 @@ def cache_put_if_absent( response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_get_and_put_if_absent( - connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_get_and_put_if_absent(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. @@ -525,7 +608,19 @@ def cache_get_and_put_if_absent( :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ + return __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + + +async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_get_and_put_if_absent. + """ + return await __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + +def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ @@ -536,8 +631,8 @@ def cache_get_and_put_if_absent( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -547,17 +642,13 @@ def cache_get_and_put_if_absent( response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status == 0: - result.value = result.value['value'] - return result -def cache_replace( - connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_replace(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exist. @@ -578,7 +669,19 @@ def cache_replace( success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id) + + +async def cache_replace_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_replace. + """ + return await __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id) + +def __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id): query_struct = Query( OP_CACHE_REPLACE, [ @@ -589,8 +692,8 @@ def cache_replace( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -600,18 +703,14 @@ def cache_replace( response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_replace_if_equals( - connection: 'Connection', cache: Union[str, int], - key: Any, sample: Any, value: Any, key_hint: 'IgniteDatatType' = None, - sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_replace_if_equals(connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, value: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, + value_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exists and value equals provided sample. @@ -636,7 +735,23 @@ def cache_replace_if_equals( success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, binary, + query_id) + +async def cache_replace_if_equals_async( + connection: 'AioConnection', cache: Union[str, int], key: Any, sample: Any, value: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_replace_if_equals. + """ + return await __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, + binary, query_id) + + +def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, binary, + query_id): query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ @@ -648,8 +763,8 @@ def cache_replace_if_equals( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -660,16 +775,12 @@ def cache_replace_if_equals( response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_clear( - connection: 'Connection', cache: Union[str, int], - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_clear(connection: 'Connection', cache: Union[str, int], binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache without notifying listeners or cache writers. @@ -683,7 +794,18 @@ def cache_clear( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_clear(connection, cache, binary, query_id) + + +async def cache_clear_async(connection: 'AioConnection', cache: Union[str, int], binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_clear. + """ + return await __cache_clear(connection, cache, binary, query_id) + +def __cache_clear(connection, cache, binary, query_id): query_struct = Query( OP_CACHE_CLEAR, [ @@ -692,8 +814,8 @@ def cache_clear( ], query_id=query_id, ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -701,11 +823,8 @@ def cache_clear( ) -def cache_clear_key( - connection: 'Connection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_clear_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -722,7 +841,19 @@ def cache_clear_key( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_clear_key(connection, cache, key, key_hint, binary, query_id) + + +async def cache_clear_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_clear_key. + """ + return await __cache_clear_key(connection, cache, key, key_hint, binary, query_id) + +def __cache_clear_key(connection, cache, key, key_hint, binary, query_id): query_struct = Query( OP_CACHE_CLEAR_KEY, [ @@ -732,8 +863,8 @@ def cache_clear_key( ], query_id=query_id, ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -742,10 +873,8 @@ def cache_clear_key( ) -def cache_clear_keys( - connection: 'Connection', cache: Union[str, int], keys: list, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_clear_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache keys without notifying listeners or cache writers. @@ -760,7 +889,18 @@ def cache_clear_keys( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_clear_keys(connection, cache, keys, binary, query_id) + +async def cache_clear_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_clear_keys. + """ + return await __cache_clear_keys(connection, cache, keys, binary, query_id) + + +def __cache_clear_keys(connection, cache, keys, binary, query_id): query_struct = Query( OP_CACHE_CLEAR_KEYS, [ @@ -770,8 +910,8 @@ def cache_clear_keys( ], query_id=query_id, ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -780,11 +920,8 @@ def cache_clear_keys( ) -def cache_remove_key( - connection: 'Connection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_remove_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -802,7 +939,19 @@ def cache_remove_key( success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_remove_key(connection, cache, key, key_hint, binary, query_id) + + +async def cache_remove_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, + key_hint: 'IgniteDataType' = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_remove_key. + """ + return await __cache_remove_key(connection, cache, key, key_hint, binary, query_id) + +def __cache_remove_key(connection, cache, key, key_hint, binary, query_id): query_struct = Query( OP_CACHE_REMOVE_KEY, [ @@ -812,8 +961,8 @@ def cache_remove_key( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -822,17 +971,13 @@ def cache_remove_key( response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_remove_if_equals( - connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, - key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_remove_if_equals(connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Removes an entry with a given key if provided value is equal to actual value, notifying listeners and cache writers. @@ -854,7 +999,19 @@ def cache_remove_if_equals( success code, or non-zero status and an error description if something has gone wrong. """ + return __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id) + + +async def cache_remove_if_equals_async( + connection: 'AioConnection', cache: Union[str, int], key: Any, sample: Any, key_hint: 'IgniteDataType' = None, + sample_hint: 'IgniteDataType' = None, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_remove_if_equals. + """ + return await __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id) + +def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id): query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ @@ -865,8 +1022,8 @@ def cache_remove_if_equals( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -876,16 +1033,12 @@ def cache_remove_if_equals( response_config=[ ('success', Bool), ], + post_process_fun=__post_process_value_by_key('success') ) - if result.status == 0: - result.value = result.value['success'] - return result -def cache_remove_keys( - connection: 'Connection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_remove_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Removes entries with given keys, notifying listeners and cache writers. @@ -900,7 +1053,18 @@ def cache_remove_keys( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_remove_keys(connection, cache, keys, binary, query_id) + +async def cache_remove_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_remove_keys. + """ + return await __cache_remove_keys(connection, cache, keys, binary, query_id) + + +def __cache_remove_keys(connection, cache, keys, binary, query_id): query_struct = Query( OP_CACHE_REMOVE_KEYS, [ @@ -910,8 +1074,8 @@ def cache_remove_keys( ], query_id=query_id, ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -920,10 +1084,8 @@ def cache_remove_keys( ) -def cache_remove_all( - connection: 'Connection', cache: Union[str, int], - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_remove_all(connection: 'Connection', cache: Union[str, int], binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Removes all entries from cache, notifying listeners and cache writers. @@ -937,7 +1099,18 @@ def cache_remove_all( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __cache_remove_all(connection, cache, binary, query_id) + + +async def cache_remove_all_async(connection: 'AioConnection', cache: Union[str, int], binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_remove_all. + """ + return await __cache_remove_all(connection, cache, binary, query_id) + +def __cache_remove_all(connection, cache, binary, query_id): query_struct = Query( OP_CACHE_REMOVE_ALL, [ @@ -946,8 +1119,8 @@ def cache_remove_all( ], query_id=query_id, ) - return query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -955,10 +1128,8 @@ def cache_remove_all( ) -def cache_get_size( - connection: 'Connection', cache: Union[str, int], peek_modes: int = 0, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: Union[int, list, tuple] = 0, + binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Gets the number of entries in cache. @@ -976,6 +1147,16 @@ def cache_get_size( cache entries on success, non-zero status and an error description otherwise. """ + return __cache_get_size(connection, cache, peek_modes, binary, query_id) + + +async def cache_get_size_async(connection: 'AioConnection', cache: Union[str, int], + peek_modes: Union[int, list, tuple] = 0, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': + return await __cache_get_size(connection, cache, peek_modes, binary, query_id) + + +def __cache_get_size(connection, cache, peek_modes, binary, query_id): if not isinstance(peek_modes, (list, tuple)): peek_modes = [peek_modes] if peek_modes else [] @@ -988,8 +1169,8 @@ def cache_get_size( ], query_id=query_id, ) - result = query_struct.perform( - connection, + return query_perform( + query_struct, connection, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -998,21 +1179,17 @@ def cache_get_size( response_config=[ ('count', Long), ], + post_process_fun=__post_process_value_by_key('count') ) - if result.status == 0: - result.value = result.value['count'] - return result -def cache_local_peek( - conn: 'Connection', cache: Union[str, int], - key: Any, key_hint: 'IgniteDataType' = None, peek_modes: int = 0, - binary: bool = False, query_id: Optional[int] = None, -) -> 'APIResult': +def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = 0, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Peeks at in-memory cached value using default optional peek mode. - This method will not load value from any persistent store or from a remote + This method will not load value from any cache store or from a remote node. :param conn: connection: connection to Ignite server, @@ -1031,6 +1208,19 @@ def cache_local_peek( :return: API result data object. Contains zero status and a peeked value (null if not found). """ + return __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) + + +async def cache_local_peek_async( + conn: 'AioConnection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = 0, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + """ + Async version of cache_local_peek. + """ + return await __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) + + +def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id): if not isinstance(peek_modes, (list, tuple)): peek_modes = [peek_modes] if peek_modes else [] @@ -1044,8 +1234,8 @@ def cache_local_peek( ], query_id=query_id, ) - result = query_struct.perform( - conn, + return query_perform( + query_struct, conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -1055,8 +1245,14 @@ def cache_local_peek( response_config=[ ('value', AnyDataObject), ], + post_process_fun=__post_process_value_by_key('value') ) - if result.status != 0: + + +def __post_process_value_by_key(key): + def internal(result): + if result.status == 0: + result.value = result.value[key] + return result - result.value = result.value['value'] - return result + return internal diff --git a/pyignite/api/result.py b/pyignite/api/result.py index f60a437..f134be9 100644 --- a/pyignite/api/result.py +++ b/pyignite/api/result.py @@ -32,7 +32,7 @@ class APIResult: message = 'Success' value = None - def __init__(self, response: 'Response'): + def __init__(self, response): self.status = getattr(response, 'status_code', OP_SUCCESS) self.query_id = response.query_id if hasattr(response, 'error_message'): diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index dc470d1..b10cc7d 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -15,23 +15,21 @@ from typing import Union -from pyignite.constants import * -from pyignite.datatypes import ( - AnyDataArray, AnyDataObject, Bool, Byte, Int, Long, Map, Null, String, - StructArray, -) +from pyignite.connection import AioConnection, Connection +from pyignite.datatypes import AnyDataArray, AnyDataObject, Bool, Byte, Int, Long, Map, Null, String, StructArray from pyignite.datatypes.sql import StatementType -from pyignite.queries import Query -from pyignite.queries.op_codes import * +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import ( + OP_QUERY_SCAN, OP_QUERY_SCAN_CURSOR_GET_PAGE, OP_QUERY_SQL, OP_QUERY_SQL_CURSOR_GET_PAGE, OP_QUERY_SQL_FIELDS, + OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, OP_RESOURCE_CLOSE +) from pyignite.utils import cache_id, deprecated from .result import APIResult +from ..queries.response import SQLResponse -def scan( - conn: 'Connection', cache: Union[str, int], page_size: int, - partitions: int = -1, local: bool = False, binary: bool = False, - query_id: int = None, -) -> APIResult: +def scan(conn: 'Connection', cache: Union[str, int], page_size: int, partitions: int = -1, local: bool = False, + binary: bool = False, query_id: int = None) -> APIResult: """ Performs scan query. @@ -58,7 +56,24 @@ def scan( * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ + return __scan(conn, cache, page_size, partitions, local, binary, query_id) + + +async def scan_async(conn: 'AioConnection', cache: Union[str, int], page_size: int, partitions: int = -1, + local: bool = False, binary: bool = False, query_id: int = None) -> APIResult: + """ + Async version of scan. + """ + return await __scan(conn, cache, page_size, partitions, local, binary, query_id) + + +def __query_result_post_process(result): + if result.status == 0: + result.value = dict(result.value) + return result + +def __scan(conn, cache, page_size, partitions, local, binary, query_id): query_struct = Query( OP_QUERY_SCAN, [ @@ -71,8 +86,8 @@ def scan( ], query_id=query_id, ) - result = query_struct.perform( - conn, + return query_perform( + query_struct, conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -86,15 +101,11 @@ def scan( ('data', Map), ('more', Bool), ], + post_process_fun=__query_result_post_process ) - if result.status == 0: - result.value = dict(result.value) - return result -def scan_cursor_get_page( - conn: 'Connection', cursor: int, query_id: int = None, -) -> APIResult: +def scan_cursor_get_page(conn: 'Connection', cursor: int, query_id: int = None) -> APIResult: """ Fetches the next scan query cursor page by cursor ID that is obtained from `scan` function. @@ -114,7 +125,14 @@ def scan_cursor_get_page( * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ + return __scan_cursor_get_page(conn, cursor, query_id) + +async def scan_cursor_get_page_async(conn: 'AioConnection', cursor: int, query_id: int = None) -> APIResult: + return await __scan_cursor_get_page(conn, cursor, query_id) + + +def __scan_cursor_get_page(conn, cursor, query_id): query_struct = Query( OP_QUERY_SCAN_CURSOR_GET_PAGE, [ @@ -122,8 +140,8 @@ def scan_cursor_get_page( ], query_id=query_id, ) - result = query_struct.perform( - conn, + return query_perform( + query_struct, conn, query_params={ 'cursor': cursor, }, @@ -131,10 +149,8 @@ def scan_cursor_get_page( ('data', Map), ('more', Bool), ], + post_process_fun=__query_result_post_process ) - if result.status == 0: - result.value = dict(result.value) - return result @deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " @@ -322,6 +338,31 @@ def sql_fields( * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ + return __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, + local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, + timeout, binary, query_id) + + +async def sql_fields_async( + conn: 'AioConnection', cache: Union[str, int], + query_str: str, page_size: int, query_args=None, schema: str = None, + statement_type: int = StatementType.ANY, distributed_joins: bool = False, + local: bool = False, replicated_only: bool = False, + enforce_join_order: bool = False, collocated: bool = False, + lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, + timeout: int = 0, binary: bool = False, query_id: int = None +) -> APIResult: + """ + Async version of sql_fields. + """ + return await __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, + local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, + max_rows, timeout, binary, query_id) + + +def __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, + replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, + binary, query_id): if query_args is None: query_args = [] @@ -346,10 +387,11 @@ def sql_fields( ('include_field_names', Bool), ], query_id=query_id, + response_type=SQLResponse ) - return query_struct.perform( - conn, + return query_perform( + query_struct, conn, query_params={ 'hash_code': cache_id(cache), 'flag': 1 if binary else 0, @@ -368,15 +410,12 @@ def sql_fields( 'timeout': timeout, 'include_field_names': include_field_names, }, - sql=True, include_field_names=include_field_names, has_cursor=True, ) -def sql_fields_cursor_get_page( - conn: 'Connection', cursor: int, field_count: int, query_id: int = None, -) -> APIResult: +def sql_fields_cursor_get_page(conn: 'Connection', cursor: int, field_count: int, query_id: int = None) -> APIResult: """ Retrieves the next query result page by cursor ID from `sql_fields`. @@ -396,7 +435,18 @@ def sql_fields_cursor_get_page( * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ + return __sql_fields_cursor_get_page(conn, cursor, field_count, query_id) + + +async def sql_fields_cursor_get_page_async(conn: 'AioConnection', cursor: int, field_count: int, + query_id: int = None) -> APIResult: + """ + Async version sql_fields_cursor_get_page. + """ + return await __sql_fields_cursor_get_page(conn, cursor, field_count, query_id) + +def __sql_fields_cursor_get_page(conn, cursor, field_count, query_id): query_struct = Query( OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, [ @@ -404,16 +454,20 @@ def sql_fields_cursor_get_page( ], query_id=query_id, ) - result = query_struct.perform( - conn, + return query_perform( + query_struct, conn, query_params={ 'cursor': cursor, }, response_config=[ ('data', StructArray([(f'field_{i}', AnyDataObject) for i in range(field_count)])), ('more', Bool), - ] + ], + post_process_fun=__post_process_sql_fields_cursor ) + + +def __post_process_sql_fields_cursor(result): if result.status != 0: return result @@ -427,9 +481,7 @@ def sql_fields_cursor_get_page( return result -def resource_close( - conn: 'Connection', cursor: int, query_id: int = None -) -> APIResult: +def resource_close(conn: 'Connection', cursor: int, query_id: int = None) -> APIResult: """ Closes a resource, such as query cursor. @@ -441,7 +493,14 @@ def resource_close( :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ + return __resource_close(conn, cursor, query_id) + + +async def resource_close_async(conn: 'AioConnection', cursor: int, query_id: int = None) -> APIResult: + return await __resource_close(conn, cursor, query_id) + +def __resource_close(conn, cursor, query_id): query_struct = Query( OP_RESOURCE_CLOSE, [ @@ -449,9 +508,9 @@ def resource_close( ], query_id=query_id, ) - return query_struct.perform( - conn, + return query_perform( + query_struct, conn, query_params={ 'cursor': cursor, - }, + } ) diff --git a/pyignite/binary.py b/pyignite/binary.py index da62bb5..4e34267 100644 --- a/pyignite/binary.py +++ b/pyignite/binary.py @@ -27,15 +27,22 @@ from collections import OrderedDict import ctypes +from io import SEEK_CUR from typing import Any import attr -from pyignite.constants import * -from .datatypes import * +from .constants import PROTOCOL_BYTE_ORDER +from .datatypes import ( + Null, ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, + DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, ByteArrayObject, ShortArrayObject, + IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, CharArrayObject, BoolArrayObject, + UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, EnumArrayObject, String, StringArrayObject, + DecimalObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject, BinaryObject, WrappedDataObject +) from .datatypes.base import IgniteDataTypeProps from .exceptions import ParseError -from .utils import entity_id, hashcode, schema_id +from .utils import entity_id, schema_id ALLOWED_FIELD_TYPES = [ @@ -69,12 +76,14 @@ def schema_id(self) -> int: def __new__(cls, *args, **kwargs) -> Any: # allow all items in Binary Object schema to be populated as optional # arguments to `__init__()` with sensible defaults. - attributes = {} - for k, v in cls.schema.items(): - attributes[k] = attr.ib(type=getattr(v, 'pythonic', type(None)), default=getattr(v, 'default', None)) - - attributes.update({'version': attr.ib(type=int, default=1)}) - cls = attr.s(cls, these=attributes) + if not attr.has(cls): + attributes = { + k: attr.ib(type=getattr(v, 'pythonic', type(None)), default=getattr(v, 'default', None)) + for k, v in cls.schema.items() + } + + attributes.update({'version': attr.ib(type=int, default=1)}) + cls = attr.s(cls, these=attributes) # skip parameters return super().__new__(cls) @@ -99,7 +108,7 @@ def __new__( """ Sort out class creation arguments. """ result = super().__new__( - mcs, name, (GenericObjectProps, )+base_classes, namespace + mcs, name, (GenericObjectProps, ) + base_classes, namespace ) def _from_python(self, stream, save_to_buf=False): @@ -111,10 +120,37 @@ def _from_python(self, stream, save_to_buf=False): :param stream: BinaryStream :param save_to_buf: Optional. If True, save serialized data to buffer. """ + initial_pos = stream.tell() + header, header_class = write_header(self, stream) + + offsets = [ctypes.sizeof(header_class)] + schema_items = list(self.schema.items()) + for field_name, field_type in schema_items: + val = getattr(self, field_name, getattr(field_type, 'default', None)) + field_start_pos = stream.tell() + field_type.from_python(stream, val) + offsets.append(max(offsets) + stream.tell() - field_start_pos) + + write_footer(self, stream, header, header_class, schema_items, offsets, initial_pos, save_to_buf) - compact_footer = stream.compact_footer + async def _from_python_async(self, stream, save_to_buf=False): + """ + Async version of _from_python + """ + initial_pos = stream.tell() + header, header_class = write_header(self, stream) - # prepare header + offsets = [ctypes.sizeof(header_class)] + schema_items = list(self.schema.items()) + for field_name, field_type in schema_items: + val = getattr(self, field_name, getattr(field_type, 'default', None)) + field_start_pos = stream.tell() + await field_type.from_python_async(stream, val) + offsets.append(max(offsets) + stream.tell() - field_start_pos) + + write_footer(self, stream, header, header_class, schema_items, offsets, initial_pos, save_to_buf) + + def write_header(obj, stream): header_class = BinaryObject.build_header() header = header_class() header.type_code = int.from_bytes( @@ -122,36 +158,30 @@ def _from_python(self, stream, save_to_buf=False): byteorder=PROTOCOL_BYTE_ORDER ) header.flags = BinaryObject.USER_TYPE | BinaryObject.HAS_SCHEMA - if compact_footer: + if stream.compact_footer: header.flags |= BinaryObject.COMPACT_FOOTER - header.version = self.version - header.type_id = self.type_id - header.schema_id = self.schema_id + header.version = obj.version + header.type_id = obj.type_id + header.schema_id = obj.schema_id - header_len = ctypes.sizeof(header_class) - initial_pos = stream.tell() + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) - # create fields and calculate offsets - offsets = [ctypes.sizeof(header_class)] - schema_items = list(self.schema.items()) - - stream.seek(initial_pos + header_len) - for field_name, field_type in schema_items: - val = getattr(self, field_name, getattr(field_type, 'default', None)) - field_start_pos = stream.tell() - field_type.from_python(stream, val) - offsets.append(max(offsets) + stream.tell() - field_start_pos) + return header, header_class + def write_footer(obj, stream, header, header_class, schema_items, offsets, initial_pos, save_to_buf): offsets = offsets[:-1] + header_len = ctypes.sizeof(header_class) # create footer if max(offsets, default=0) < 255: header.flags |= BinaryObject.OFFSET_ONE_BYTE elif max(offsets) < 65535: header.flags |= BinaryObject.OFFSET_TWO_BYTES + schema_class = BinaryObject.schema_type(header.flags) * len(offsets) schema = schema_class() - if compact_footer: + + if stream.compact_footer: for i, offset in enumerate(offsets): schema[i] = offset else: @@ -171,8 +201,8 @@ def _from_python(self, stream, save_to_buf=False): stream.write(schema) if save_to_buf: - self._buffer = bytes(stream.mem_view(initial_pos, stream.tell() - initial_pos)) - self._hashcode = header.hash_code + obj._buffer = bytes(stream.mem_view(initial_pos, stream.tell() - initial_pos)) + obj._hashcode = header.hash_code def _setattr(self, attr_name: str, attr_value: Any): # reset binary representation, if any field is changed @@ -184,6 +214,7 @@ def _setattr(self, attr_name: str, attr_value: Any): super(result, self).__setattr__(attr_name, attr_value) setattr(result, _from_python.__name__, _from_python) + setattr(result, _from_python_async.__name__, _from_python_async) setattr(result, '__setattr__', _setattr) setattr(result, '_buffer', None) setattr(result, '_hashcode', None) diff --git a/pyignite/cache.py b/pyignite/cache.py index a91a3cf..5fba6fb 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -16,54 +16,145 @@ import time from typing import Any, Dict, Iterable, Optional, Tuple, Union -from .constants import * -from .binary import GenericObjectMeta, unwrap_binary +from .constants import AFFINITY_RETRIES, AFFINITY_DELAY +from .binary import GenericObjectMeta from .datatypes import prop_codes from .datatypes.internal import AnyDataObject -from .exceptions import ( - CacheCreationError, CacheError, ParameterError, SQLError, - connection_errors, -) -from .utils import ( - cache_id, get_field_by_id, is_wrapped, - status_to_exception, unsigned -) +from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError, connection_errors +from .utils import cache_id, get_field_by_id, status_to_exception, unsigned from .api.cache_config import ( - cache_create, cache_create_with_config, - cache_get_or_create, cache_get_or_create_with_config, - cache_destroy, cache_get_configuration, + cache_create, cache_create_with_config, cache_get_or_create, cache_get_or_create_with_config, cache_destroy, + cache_get_configuration ) from .api.key_value import ( - cache_get, cache_put, cache_get_all, cache_put_all, cache_replace, - cache_clear, cache_clear_key, cache_clear_keys, - cache_contains_key, cache_contains_keys, - cache_get_and_put, cache_get_and_put_if_absent, cache_put_if_absent, - cache_get_and_remove, cache_get_and_replace, - cache_remove_key, cache_remove_keys, cache_remove_all, - cache_remove_if_equals, cache_replace_if_equals, cache_get_size, + cache_get, cache_put, cache_get_all, cache_put_all, cache_replace, cache_clear, cache_clear_key, cache_clear_keys, + cache_contains_key, cache_contains_keys, cache_get_and_put, cache_get_and_put_if_absent, cache_put_if_absent, + cache_get_and_remove, cache_get_and_replace, cache_remove_key, cache_remove_keys, cache_remove_all, + cache_remove_if_equals, cache_replace_if_equals, cache_get_size ) -from .api.sql import scan, scan_cursor_get_page, sql, sql_cursor_get_page +from .cursors import ScanCursor, SqlCursor from .api.affinity import cache_get_node_partitions - PROP_CODES = set([ getattr(prop_codes, x) for x in dir(prop_codes) if x.startswith('PROP_') ]) -CACHE_CREATE_FUNCS = { - True: { - True: cache_get_or_create_with_config, - False: cache_create_with_config, - }, - False: { - True: cache_get_or_create, - False: cache_create, - }, -} - - -class Cache: + + +def get_cache(client: 'Client', settings: Union[str, dict]) -> 'Cache': + name, settings = __parse_settings(settings) + if settings: + raise ParameterError('Only cache name allowed as a parameter') + + return Cache(client, name) + + +def create_cache(client: 'Client', settings: Union[str, dict]) -> 'Cache': + name, settings = __parse_settings(settings) + + conn = client.random_node + if settings: + result = cache_create_with_config(conn, settings) + else: + result = cache_create(conn, name) + + if result.status != 0: + raise CacheCreationError(result.message) + + return Cache(client, name) + + +def get_or_create_cache(client: 'Client', settings: Union[str, dict]) -> 'Cache': + name, settings = __parse_settings(settings) + + conn = client.random_node + if settings: + result = cache_get_or_create_with_config(conn, settings) + else: + result = cache_get_or_create(conn, name) + + if result.status != 0: + raise CacheCreationError(result.message) + + return Cache(client, name) + + +def __parse_settings(settings: Union[str, dict]) -> Tuple[Optional[str], Optional[dict]]: + if isinstance(settings, str): + return settings, None + elif isinstance(settings, dict) and prop_codes.PROP_NAME in settings: + name = settings[prop_codes.PROP_NAME] + if len(settings) == 1: + return name, None + + if not set(settings).issubset(PROP_CODES): + raise ParameterError('One or more settings was not recognized') + + return name, settings + else: + raise ParameterError('You should supply at least cache name') + + +class BaseCacheMixin: + def _get_affinity_key(self, key, key_hint=None): + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + if self.affinity.get('is_applicable'): + config = self.affinity.get('cache_config') + if config: + affinity_key_id = config.get(key_hint.type_id) + + if affinity_key_id and isinstance(key, GenericObjectMeta): + return get_field_by_id(key, affinity_key_id) + + return key, key_hint + + def _update_affinity(self, full_affinity): + self.affinity['version'] = full_affinity['version'] + + full_mapping = full_affinity.get('partition_mapping') + if full_mapping and self.cache_id in full_mapping: + self.affinity.update(full_mapping[self.cache_id]) + + def _get_node_by_hashcode(self, hashcode, parts): + """ + Get node by key hashcode. Calculate partition and return node on that it is primary. + (algorithm is taken from `RendezvousAffinityFunction.java`) + """ + + # calculate partition for key or affinity key + # (algorithm is taken from `RendezvousAffinityFunction.java`) + mask = parts - 1 + + if parts & mask == 0: + part = (hashcode ^ (unsigned(hashcode) >> 16)) & mask + else: + part = abs(hashcode // parts) + + assert 0 <= part < parts, 'Partition calculation has failed' + + node_mapping = self.affinity.get('node_mapping') + if not node_mapping: + return None + + node_uuid, best_conn = None, None + for u, p in node_mapping.items(): + if part in p: + node_uuid = u + break + + if node_uuid: + for n in self.client._nodes: + if n.uuid == node_uuid: + best_conn = n + break + if best_conn and best_conn.alive: + return best_conn + + +class Cache(BaseCacheMixin): """ Ignite cache abstraction. Users should never use this class directly, but construct its instances with @@ -73,77 +164,18 @@ class Cache: :ref:`this example ` on how to do it. """ - affinity = None - _cache_id = None - _name = None - _client = None - _settings = None - - @staticmethod - def _validate_settings( - settings: Union[str, dict] = None, get_only: bool = False, - ): - if any([ - not settings, - type(settings) not in (str, dict), - type(settings) is dict and prop_codes.PROP_NAME not in settings, - ]): - raise ParameterError('You should supply at least cache name') - - if all([ - type(settings) is dict, - not set(settings).issubset(PROP_CODES), - ]): - raise ParameterError('One or more settings was not recognized') - - if get_only and type(settings) is dict and len(settings) != 1: - raise ParameterError('Only cache name allowed as a parameter') - - def __init__( - self, client: 'Client', settings: Union[str, dict] = None, - with_get: bool = False, get_only: bool = False, - ): + def __init__(self, client: 'Client', name: str): """ - Initialize cache object. + Initialize cache object. For internal use. :param client: Ignite client, - :param settings: cache settings. Can be a string (cache name) or a dict - of cache properties and their values. In this case PROP_NAME is - mandatory, - :param with_get: (optional) do not raise exception, if the cache - is already exists. Defaults to False, - :param get_only: (optional) do not communicate with Ignite server - at all, only create Cache instance. Defaults to False. + :param name: Cache name. """ self._client = client - self._validate_settings(settings) - if type(settings) == str: - self._name = settings - else: - self._name = settings[prop_codes.PROP_NAME] - - if not get_only: - func = CACHE_CREATE_FUNCS[type(settings) is dict][with_get] - result = func(client.random_node, settings) - if result.status != 0: - raise CacheCreationError(result.message) - + self._name = name + self._settings = None self._cache_id = cache_id(self._name) - self.affinity = { - 'version': (0, 0), - } - - def get_protocol_version(self) -> Optional[Tuple]: - """ - Returns the tuple of major, minor, and revision numbers of the used - thin protocol version, or None, if no connection to the Ignite cluster - was not yet established. - - This method is not a part of the public API. Unless you wish to - extend the `pyignite` capabilities (with additional testing, logging, - examining connections, et c.) you probably should not use it. - """ - return self.client.protocol_version + self.affinity = {'version': (0, 0)} @property def settings(self) -> Optional[dict]: @@ -197,18 +229,6 @@ def cache_id(self) -> int: """ return self._cache_id - def _process_binary(self, value: Any) -> Any: - """ - Detects and recursively unwraps Binary Object. - - :param value: anything that could be a Binary Object, - :return: the result of the Binary Object unwrapping with all other data - left intact. - """ - if is_wrapped(value): - return unwrap_binary(self._client, value) - return value - @status_to_exception(CacheError) def destroy(self): """ @@ -234,9 +254,7 @@ def _get_affinity(self, conn: 'Connection') -> Dict: return result - def get_best_node( - self, key: Any = None, key_hint: 'IgniteDataType' = None, - ) -> 'Connection': + def get_best_node(self, key: Any = None, key_hint: 'IgniteDataType' = None) -> 'Connection': """ Returns the node from the list of the nodes, opened by client, that most probably contains the needed key-value pair. See IEP-23. @@ -253,14 +271,11 @@ def get_best_node( conn = self._client.random_node if self.client.partition_aware and key is not None: - if key_hint is None: - key_hint = AnyDataObject.map_python_type(key) - if self.affinity['version'] < self._client.affinity_version: # update partition mapping while True: try: - self.affinity = self._get_affinity(conn) + full_affinity = self._get_affinity(conn) break except connection_errors: # retry if connection failed @@ -270,68 +285,23 @@ def get_best_node( # server did not create mapping in time return conn - # flatten it a bit - try: - self.affinity.update(self.affinity['partition_mapping'][0]) - except IndexError: - return conn - del self.affinity['partition_mapping'] - - # calculate the number of partitions - parts = 0 - if 'node_mapping' in self.affinity: - for p in self.affinity['node_mapping'].values(): - parts += len(p) - - self.affinity['number_of_partitions'] = parts + self._update_affinity(full_affinity) for conn in self.client._nodes: if not conn.alive: conn.reconnect() - else: - # get number of partitions - parts = self.affinity.get('number_of_partitions') + + parts = self.affinity.get('number_of_partitions') if not parts: return conn - if self.affinity['is_applicable']: - affinity_key_id = self.affinity['cache_config'].get( - key_hint.type_id, - None - ) - if affinity_key_id and isinstance(key, GenericObjectMeta): - key, key_hint = get_field_by_id(key, affinity_key_id) + key, key_hint = self._get_affinity_key(key, key_hint) + hashcode = key_hint.hashcode(key, self._client) - # calculate partition for key or affinity key - # (algorithm is taken from `RendezvousAffinityFunction.java`) - base_value = key_hint.hashcode(key, self._client) - mask = parts - 1 - - if parts & mask == 0: - part = (base_value ^ (unsigned(base_value) >> 16)) & mask - else: - part = abs(base_value // parts) - - assert 0 <= part < parts, 'Partition calculation has failed' - - # search for connection - try: - node_uuid, best_conn = None, None - for u, p in self.affinity['node_mapping'].items(): - if part in p: - node_uuid = u - break - - if node_uuid: - for n in conn.client._nodes: - if n.uuid == node_uuid: - best_conn = n - break - if best_conn and best_conn.alive: - conn = best_conn - except KeyError: - pass + best_node = self._get_node_by_hashcode(hashcode, parts) + if best_node: + return best_node return conn @@ -354,12 +324,12 @@ def get(self, key, key_hint: object = None) -> Any: key, key_hint=key_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) def put( - self, key, value, key_hint: object = None, value_hint: object = None + self, key, value, key_hint: object = None, value_hint: object = None ): """ Puts a value with a given key to cache (overwriting existing value @@ -392,7 +362,7 @@ def get_all(self, keys: list) -> list: result = cache_get_all(self.get_best_node(), self._cache_id, keys) if result.value: for key, value in result.value.items(): - result.value[key] = self._process_binary(value) + result.value[key] = self.client.unwrap_binary(value) return result @status_to_exception(CacheError) @@ -409,7 +379,7 @@ def put_all(self, pairs: dict): @status_to_exception(CacheError) def replace( - self, key, value, key_hint: object = None, value_hint: object = None + self, key, value, key_hint: object = None, value_hint: object = None ): """ Puts a value with a given key to cache only if the key already exist. @@ -429,7 +399,7 @@ def replace( self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) @@ -465,6 +435,16 @@ def clear_key(self, key, key_hint: object = None): key_hint=key_hint ) + @status_to_exception(CacheError) + def clear_keys(self, keys: Iterable): + """ + Clears the cache key without notifying listeners or cache writers. + + :param keys: a list of keys or (key, type hint) tuples + """ + + return cache_clear_keys(self.get_best_node(), self._cache_id, keys) + @status_to_exception(CacheError) def contains_key(self, key, key_hint=None) -> bool: """ @@ -493,7 +473,7 @@ def contains_keys(self, keys: Iterable) -> bool: :param keys: a list of keys or (key, type hint) tuples, :return: boolean `True` when all keys are present, `False` otherwise. """ - return cache_contains_keys(self._client, self._cache_id, keys) + return cache_contains_keys(self.get_best_node(), self._cache_id, keys) @status_to_exception(CacheError) def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: @@ -518,12 +498,12 @@ def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: key, value, key_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) def get_and_put_if_absent( - self, key, value, key_hint=None, value_hint=None + self, key, value, key_hint=None, value_hint=None ): """ Puts a value with a given key to cache only if the key does not @@ -546,7 +526,7 @@ def get_and_put_if_absent( key, value, key_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) @@ -591,12 +571,12 @@ def get_and_remove(self, key, key_hint=None) -> Any: key, key_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) def get_and_replace( - self, key, value, key_hint=None, value_hint=None + self, key, value, key_hint=None, value_hint=None ) -> Any: """ Puts a value with a given key to cache, returning previous value @@ -620,7 +600,7 @@ def get_and_replace( key, value, key_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) @@ -683,8 +663,8 @@ def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @status_to_exception(CacheError) def replace_if_equals( - self, key, sample, value, - key_hint=None, sample_hint=None, value_hint=None + self, key, sample, value, + key_hint=None, sample_hint=None, value_hint=None ) -> Any: """ Puts a value with a given key to cache only if the key already exists @@ -710,7 +690,7 @@ def replace_if_equals( key, sample, value, key_hint, sample_hint, value_hint ) - result.value = self._process_binary(result.value) + result.value = self.client.unwrap_binary(result.value) return result @status_to_exception(CacheError) @@ -727,9 +707,7 @@ def get_size(self, peek_modes=0): self.get_best_node(), self._cache_id, peek_modes ) - def scan( - self, page_size: int = 1, partitions: int = -1, local: bool = False - ): + def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False): """ Returns all key-value pairs from the cache, similar to `get_all`, but with internal pagination, which is slower, but safer. @@ -740,40 +718,14 @@ def scan( (negative to query entire cache), :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, - :return: generator with key-value pairs. + :return: Scan query cursor. """ - node = self.get_best_node() - - result = scan( - node, - self._cache_id, - page_size, - partitions, - local - ) - if result.status != 0: - raise CacheError(result.message) - - cursor = result.value['cursor'] - for k, v in result.value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v - - while result.value['more']: - result = scan_cursor_get_page(node, cursor) - if result.status != 0: - raise CacheError(result.message) - - for k, v in result.value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v + return ScanCursor(self.client, self._cache_id, page_size, partitions, local) def select_row( - self, query_str: str, page_size: int = 1, - query_args: Optional[list] = None, distributed_joins: bool = False, - replicated_only: bool = False, local: bool = False, timeout: int = 0 + self, query_str: str, page_size: int = 1, + query_args: Optional[list] = None, distributed_joins: bool = False, + replicated_only: bool = False, local: bool = False, timeout: int = 0 ): """ Executes a simplified SQL SELECT query over data stored in the cache. @@ -791,46 +743,13 @@ def select_row( on local node only. Defaults to False, :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :return: generator with key-value pairs. - """ - node = self.get_best_node() - - def generate_result(value): - cursor = value['cursor'] - more = value['more'] - for k, v in value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v - - while more: - inner_result = sql_cursor_get_page(node, cursor) - if result.status != 0: - raise SQLError(result.message) - more = inner_result.value['more'] - for k, v in inner_result.value['data'].items(): - k = self._process_binary(k) - v = self._process_binary(v) - yield k, v - + :return: Sql cursor. + """ type_name = self.settings[ prop_codes.PROP_QUERY_ENTITIES ][0]['value_type_name'] if not type_name: raise SQLError('Value type is unknown') - result = sql( - node, - self._cache_id, - type_name, - query_str, - page_size, - query_args, - distributed_joins, - replicated_only, - local, - timeout - ) - if result.status != 0: - raise SQLError(result.message) - return generate_result(result.value) + return SqlCursor(self.client, self._cache_id, type_name, query_str, page_size, query_args, + distributed_joins, replicated_only, local, timeout) diff --git a/pyignite/client.py b/pyignite/client.py index 9416474..e4eef6a 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -44,22 +44,20 @@ import random import re from itertools import chain -from typing import Dict, Iterable, List, Optional, Tuple, Type, Union +from typing import Iterable, Type, Union, Any from .api.binary import get_binary_type, put_binary_type from .api.cache_config import cache_get_names -from .api.sql import sql_fields, sql_fields_cursor_get_page -from .cache import Cache +from .cursors import SqlFieldsCursor +from .cache import Cache, create_cache, get_cache, get_or_create_cache from .connection import Connection -from .constants import * +from .constants import IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER from .datatypes import BinaryObject from .datatypes.internal import tc_map -from .exceptions import ( - BinaryTypeError, CacheError, ReconnectError, SQLError, connection_errors, -) +from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors +from .stream import BinaryStream, READ_BACKWARD from .utils import ( - cache_id, capitalize, entity_id, schema_id, process_delimiter, - status_to_exception, is_iterable, + cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, is_wrapped ) from .binary import GenericObjectMeta @@ -67,58 +65,24 @@ __all__ = ['Client'] -class Client: - """ - This is a main `pyignite` class, that is build upon the - :class:`~pyignite.connection.Connection`. In addition to the attributes, - properties and methods of its parent class, `Client` implements - the following features: - - * cache factory. Cache objects are used for key-value operations, - * Ignite SQL endpoint, - * binary types registration endpoint. - """ - - _registry = defaultdict(dict) - _compact_footer: bool = None - _connection_args: Dict = None - _current_node: int = None - _nodes: List[Connection] = None - +class BaseClient: # used for Complex object data class names sanitizing _identifier = re.compile(r'[^0-9a-zA-Z_.+$]', re.UNICODE) _ident_start = re.compile(r'^[^a-zA-Z_]+', re.UNICODE) - affinity_version: Optional[Tuple] = None - protocol_version: Optional[Tuple] = None - - def __init__( - self, compact_footer: bool = None, partition_aware: bool = False, - **kwargs - ): - """ - Initialize client. - - :param compact_footer: (optional) use compact (True, recommended) or - full (False) schema approach when serializing Complex objects. - Default is to use the same approach the server is using (None). - Apache Ignite binary protocol documentation on this topic: - https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-schema - :param partition_aware: (optional) try to calculate the exact data - placement from the key before to issue the key operation to the - server node: - https://cwiki.apache.org/confluence/display/IGNITE/IEP-23%3A+Best+Effort+Affinity+for+thin+clients - The feature is in experimental status, so the parameter is `False` - by default. This will be changed later. - """ + def __init__(self, compact_footer: bool = None, partition_aware: bool = False, **kwargs): self._compact_footer = compact_footer + self._partition_aware = partition_aware self._connection_args = kwargs + self._registry = defaultdict(dict) self._nodes = [] self._current_node = 0 self._partition_aware = partition_aware self.affinity_version = (0, 0) + self._protocol_version = None - def get_protocol_version(self) -> Optional[Tuple]: + @property + def protocol_version(self): """ Returns the tuple of major, minor, and revision numbers of the used thin protocol version, or None, if no connection to the Ignite cluster @@ -128,7 +92,11 @@ def get_protocol_version(self) -> Optional[Tuple]: extend the `pyignite` capabilities (with additional testing, logging, examining connections, et c.) you probably should not use it. """ - return self.protocol_version + return self._protocol_version + + @protocol_version.setter + def protocol_version(self, value): + self._protocol_version = value @property def partition_aware(self): @@ -136,32 +104,182 @@ def partition_aware(self): @property def partition_awareness_supported_by_protocol(self): - # TODO: Need to re-factor this. I believe, we need separate class or - # set of functions to work with protocol versions without manually - # comparing versions with just some random tuples return self.protocol_version is not None and self.protocol_version >= (1, 4, 0) - def connect(self, *args): + @property + def compact_footer(self) -> bool: """ - Connect to Ignite cluster node(s). + This property remembers Complex object schema encoding approach when + decoding any Complex object, to use the same approach on Complex + object encoding. - :param args: (optional) host(s) and port(s) to connect to. + :return: True if compact schema was used by server or no Complex + object decoding has yet taken place, False if full schema was used. """ + # this is an ordinary object property, but its backing storage + # is a class attribute + + # use compact schema by default, but leave initial (falsy) backing + # value unchanged + return self._compact_footer or self._compact_footer is None + + @compact_footer.setter + def compact_footer(self, value: bool): + # normally schema approach should not change + if self._compact_footer not in (value, None): + raise Warning('Can not change client schema approach.') + else: + self._compact_footer = value + + @staticmethod + def _process_connect_args(*args): if len(args) == 0: # no parameters − use default Ignite host and port - nodes = [(IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT)] - elif len(args) == 1 and is_iterable(args[0]): + return [(IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT)] + if len(args) == 1 and is_iterable(args[0]): # iterable of host-port pairs is given - nodes = args[0] - elif ( - len(args) == 2 - and isinstance(args[0], str) - and isinstance(args[1], int) - ): + return args[0] + if len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], int): # host and port are given - nodes = [args] - else: - raise ConnectionError('Connection parameters are not valid.') + return [args] + + raise ConnectionError('Connection parameters are not valid.') + + def _process_get_binary_type_result(self, result): + if result.status != 0 or not result.value['type_exists']: + return result + + binary_fields = result.value.pop('binary_fields') + old_format_schemas = result.value.pop('schema') + result.value['schemas'] = [] + for s_id, field_ids in old_format_schemas.items(): + result.value['schemas'].append(self._convert_schema(field_ids, binary_fields)) + return result + + @staticmethod + def _convert_type(tc_type: int): + try: + return tc_map(tc_type.to_bytes(1, PROTOCOL_BYTE_ORDER)) + except (KeyError, OverflowError): + # if conversion to char or type lookup failed, + # we probably have a binary object type ID + return BinaryObject + + def _convert_schema(self, field_ids: list, binary_fields: list) -> OrderedDict: + converted_schema = OrderedDict() + for field_id in field_ids: + binary_field = next(x for x in binary_fields if x['field_id'] == field_id) + converted_schema[binary_field['field_name']] = self._convert_type(binary_field['type_id']) + return converted_schema + + @staticmethod + def _create_dataclass(type_name: str, schema: OrderedDict = None) -> Type: + """ + Creates default (generic) class for Ignite Complex object. + + :param type_name: Complex object type name, + :param schema: Complex object schema, + :return: the resulting class. + """ + schema = schema or {} + return GenericObjectMeta(type_name, (), {}, schema=schema) + + @classmethod + def _create_type_name(cls, type_name: str) -> str: + """ + Creates Python data class name from Ignite binary type name. + + Handles all the special cases found in + `java.org.apache.ignite.binary.BinaryBasicNameMapper.simpleName()`. + Tries to adhere to PEP8 along the way. + """ + + # general sanitizing + type_name = cls._identifier.sub('', type_name) + + # - name ending with '$' (Scala) + # - name + '$' + some digits (anonymous class) + # - '$$Lambda$' in the middle + type_name = process_delimiter(type_name, '$') + + # .NET outer/inner class delimiter + type_name = process_delimiter(type_name, '+') + + # Java fully qualified class name + type_name = process_delimiter(type_name, '.') + + # start chars sanitizing + type_name = capitalize(cls._ident_start.sub('', type_name)) + + return type_name + + def _sync_binary_registry(self, type_id: int, type_info: dict): + """ + Sync binary registry + :param type_id: Complex object type ID. + :param type_info: Complex object type info. + """ + if type_info['type_exists']: + for schema in type_info['schemas']: + if not self._registry[type_id].get(schema_id(schema), None): + data_class = self._create_dataclass( + self._create_type_name(type_info['type_name']), + schema, + ) + self._registry[type_id][schema_id(schema)] = data_class + + def _get_from_registry(self, type_id, schema): + """ + Get binary type info from registry. + + :param type_id: Complex object type ID. + :param schema: Complex object schema. + """ + if schema: + try: + return self._registry[type_id][schema_id(schema)] + except KeyError: + return None + return self._registry[type_id] + + +class Client(BaseClient): + """ + This is a main `pyignite` class, that is build upon the + :class:`~pyignite.connection.Connection`. In addition to the attributes, + properties and methods of its parent class, `Client` implements + the following features: + + * cache factory. Cache objects are used for key-value operations, + * Ignite SQL endpoint, + * binary types registration endpoint. + """ + + def __init__(self, compact_footer: bool = None, partition_aware: bool = False, **kwargs): + """ + Initialize client. + + :param compact_footer: (optional) use compact (True, recommended) or + full (False) schema approach when serializing Complex objects. + Default is to use the same approach the server is using (None). + Apache Ignite binary protocol documentation on this topic: + https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-schema + :param partition_aware: (optional) try to calculate the exact data + placement from the key before to issue the key operation to the + server node: + https://cwiki.apache.org/confluence/display/IGNITE/IEP-23%3A+Best+Effort+Affinity+for+thin+clients + The feature is in experimental status, so the parameter is `False` + by default. This will be changed later. + """ + super().__init__(compact_footer, partition_aware, **kwargs) + + def connect(self, *args): + """ + Connect to Ignite cluster node(s). + + :param args: (optional) host(s) and port(s) to connect to. + """ + nodes = self._process_connect_args(*args) # the following code is quite twisted, because the protocol version # is initially unknown @@ -169,14 +287,12 @@ def connect(self, *args): # TODO: open first node in foreground, others − in background for i, node in enumerate(nodes): host, port = node - conn = Connection(self, **self._connection_args) - conn.host = host - conn.port = port + conn = Connection(self, host, port, **self._connection_args) try: if self.protocol_version is None or self.partition_aware: # open connection before adding to the pool - conn.connect(host, port) + conn.connect() # now we have the protocol version if not self.partition_aware: @@ -210,13 +326,7 @@ def random_node(self) -> Connection: """ if self.partition_aware: # if partition awareness is used just pick a random connected node - try: - return random.choice( - list(n for n in self._nodes if n.alive) - ) - except IndexError: - # cannot choose from an empty sequence - raise ReconnectError('Can not reconnect: out of nodes.') from None + return self._get_random_node() else: # if partition awareness is not used then just return the current # node if it's alive or the next usable node if connection with the @@ -238,7 +348,7 @@ def random_node(self) -> Connection: for i in chain(range(self._current_node, num_nodes), range(self._current_node)): node = self._nodes[i] try: - node.connect(node.host, node.port) + node.connect() except connection_errors: pass else: @@ -247,6 +357,19 @@ def random_node(self) -> Connection: # no nodes left raise ReconnectError('Can not reconnect: out of nodes.') + def _get_random_node(self, reconnect=True): + alive_nodes = [n for n in self._nodes if n.alive] + if alive_nodes: + return random.choice(alive_nodes) + elif reconnect: + for n in self._nodes: + n.reconnect() + + return self._get_random_node(reconnect=False) + else: + # cannot choose from an empty sequence + raise ReconnectError('Can not reconnect: out of nodes.') from None + @status_to_exception(BinaryTypeError) def get_binary_type(self, binary_type: Union[str, int]) -> dict: """ @@ -267,71 +390,8 @@ def get_binary_type(self, binary_type: Union[str, int]) -> dict: - `schemas`: a list, containing the Complex object schemas in format: OrderedDict[field name: field type hint]. A schema can be empty. """ - def convert_type(tc_type: int): - try: - return tc_map(tc_type.to_bytes(1, PROTOCOL_BYTE_ORDER)) - except (KeyError, OverflowError): - # if conversion to char or type lookup failed, - # we probably have a binary object type ID - return BinaryObject - - def convert_schema( - field_ids: list, binary_fields: list - ) -> OrderedDict: - converted_schema = OrderedDict() - for field_id in field_ids: - binary_field = [ - x - for x in binary_fields - if x['field_id'] == field_id - ][0] - converted_schema[binary_field['field_name']] = convert_type( - binary_field['type_id'] - ) - return converted_schema - - conn = self.random_node - - result = get_binary_type(conn, binary_type) - if result.status != 0 or not result.value['type_exists']: - return result - - binary_fields = result.value.pop('binary_fields') - old_format_schemas = result.value.pop('schema') - result.value['schemas'] = [] - for s_id, field_ids in old_format_schemas.items(): - result.value['schemas'].append( - convert_schema(field_ids, binary_fields) - ) - return result - - @property - def compact_footer(self) -> bool: - """ - This property remembers Complex object schema encoding approach when - decoding any Complex object, to use the same approach on Complex - object encoding. - - :return: True if compact schema was used by server or no Complex - object decoding has yet taken place, False if full schema was used. - """ - # this is an ordinary object property, but its backing storage - # is a class attribute - - # use compact schema by default, but leave initial (falsy) backing - # value unchanged - return ( - self.__class__._compact_footer - or self.__class__._compact_footer is None - ) - - @compact_footer.setter - def compact_footer(self, value: bool): - # normally schema approach should not change - if self.__class__._compact_footer not in (value, None): - raise Warning('Can not change client schema approach.') - else: - self.__class__._compact_footer = value + result = get_binary_type(self.random_node, binary_type) + return self._process_get_binary_type_result(result) @status_to_exception(BinaryTypeError) def put_binary_type( @@ -353,71 +413,9 @@ def put_binary_type( When register binary type, pass a dict of field names: field types. Binary type with no fields is OK. """ - return put_binary_type( - self.random_node, type_name, affinity_key_field, is_enum, schema - ) + return put_binary_type(self.random_node, type_name, affinity_key_field, is_enum, schema) - @staticmethod - def _create_dataclass(type_name: str, schema: OrderedDict = None) -> Type: - """ - Creates default (generic) class for Ignite Complex object. - - :param type_name: Complex object type name, - :param schema: Complex object schema, - :return: the resulting class. - """ - schema = schema or {} - return GenericObjectMeta(type_name, (), {}, schema=schema) - - def _sync_binary_registry(self, type_id: int): - """ - Reads Complex object description from Ignite server. Creates default - Complex object classes and puts in registry, if not already there. - - :param type_id: Complex object type ID. - """ - type_info = self.get_binary_type(type_id) - if type_info['type_exists']: - for schema in type_info['schemas']: - if not self._registry[type_id].get(schema_id(schema), None): - data_class = self._create_dataclass( - self._create_type_name(type_info['type_name']), - schema, - ) - self._registry[type_id][schema_id(schema)] = data_class - - @classmethod - def _create_type_name(cls, type_name: str) -> str: - """ - Creates Python data class name from Ignite binary type name. - - Handles all the special cases found in - `java.org.apache.ignite.binary.BinaryBasicNameMapper.simpleName()`. - Tries to adhere to PEP8 along the way. - """ - - # general sanitizing - type_name = cls._identifier.sub('', type_name) - - # - name ending with '$' (Scala) - # - name + '$' + some digits (anonymous class) - # - '$$Lambda$' in the middle - type_name = process_delimiter(type_name, '$') - - # .NET outer/inner class delimiter - type_name = process_delimiter(type_name, '+') - - # Java fully qualified class name - type_name = process_delimiter(type_name, '.') - - # start chars sanitizing - type_name = capitalize(cls._ident_start.sub('', type_name)) - - return type_name - - def register_binary_type( - self, data_class: Type, affinity_key_field: str = None, - ): + def register_binary_type(self, data_class: Type, affinity_key_field: str = None): """ Register the given class as a representation of a certain Complex object type. Discards autogenerated or previously registered class. @@ -425,47 +423,44 @@ def register_binary_type( :param data_class: Complex object class, :param affinity_key_field: (optional) affinity parameter. """ - if not self.query_binary_type( - data_class.type_id, data_class.schema_id - ): - self.put_binary_type( - data_class.type_name, - affinity_key_field, - schema=data_class.schema, - ) + if not self.query_binary_type(data_class.type_id, data_class.schema_id): + self.put_binary_type(data_class.type_name, affinity_key_field, schema=data_class.schema) self._registry[data_class.type_id][data_class.schema_id] = data_class - def query_binary_type( - self, binary_type: Union[int, str], schema: Union[int, dict] = None, - sync: bool = True - ): + def query_binary_type(self, binary_type: Union[int, str], schema: Union[int, dict] = None): """ Queries the registry of Complex object classes. :param binary_type: Complex object type name or ID, - :param schema: (optional) Complex object schema or schema ID, - :param sync: (optional) look up the Ignite server for registered - Complex objects and create data classes for them if needed, + :param schema: (optional) Complex object schema or schema ID :return: found dataclass or None, if `schema` parameter is provided, a dict of {schema ID: dataclass} format otherwise. """ type_id = entity_id(binary_type) - s_id = schema_id(schema) - - if schema: - try: - result = self._registry[type_id][s_id] - except KeyError: - result = None - else: - result = self._registry[type_id] - if sync and not result: - self._sync_binary_registry(type_id) - return self.query_binary_type(type_id, s_id, sync=False) + result = self._get_from_registry(type_id, schema) + if not result: + type_info = self.get_binary_type(type_id) + self._sync_binary_registry(type_id, type_info) + return self._get_from_registry(type_id, schema) return result + def unwrap_binary(self, value: Any) -> Any: + """ + Detects and recursively unwraps Binary Object. + + :param value: anything that could be a Binary Object, + :return: the result of the Binary Object unwrapping with all other data + left intact. + """ + if is_wrapped(value): + blob, offset = value + with BinaryStream(self, blob) as stream: + data_class = BinaryObject.parse(stream) + return BinaryObject.to_python(stream.read_ctype(data_class, direction=READ_BACKWARD), self) + return value + def create_cache(self, settings: Union[str, dict]) -> 'Cache': """ Creates Ignite cache by name. Raises `CacheError` if such a cache is @@ -477,7 +472,7 @@ def create_cache(self, settings: Union[str, dict]) -> 'Cache': :ref:`cache creation example `, :return: :class:`~pyignite.cache.Cache` object. """ - return Cache(self, settings) + return create_cache(self, settings) def get_or_create_cache(self, settings: Union[str, dict]) -> 'Cache': """ @@ -489,7 +484,7 @@ def get_or_create_cache(self, settings: Union[str, dict]) -> 'Cache': :ref:`cache creation example `, :return: :class:`~pyignite.cache.Cache` object. """ - return Cache(self, settings, with_get=True) + return get_or_create_cache(self, settings) def get_cache(self, settings: Union[str, dict]) -> 'Cache': """ @@ -501,7 +496,7 @@ def get_cache(self, settings: Union[str, dict]) -> 'Cache': property is allowed), :return: :class:`~pyignite.cache.Cache` object. """ - return Cache(self, settings, get_only=True) + return get_cache(self, settings) @status_to_exception(CacheError) def get_cache_names(self) -> list: @@ -559,42 +554,12 @@ def sql( :return: generator with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ - def generate_result(value): - cursor = value['cursor'] - more = value['more'] - - if include_field_names: - yield value['fields'] - field_count = len(value['fields']) - else: - field_count = value['field_count'] - for line in value['data']: - yield line - - while more: - inner_result = sql_fields_cursor_get_page( - conn, cursor, field_count - ) - if inner_result.status != 0: - raise SQLError(result.message) - more = inner_result.value['more'] - for line in inner_result.value['data']: - yield line - - conn = self.random_node c_id = cache.cache_id if isinstance(cache, Cache) else cache_id(cache) if c_id != 0: schema = None - result = sql_fields( - conn, c_id, query_str, page_size, query_args, schema, - statement_type, distributed_joins, local, replicated_only, - enforce_join_order, collocated, lazy, include_field_names, - max_rows, timeout, - ) - if result.status != 0: - raise SQLError(result.message) - - return generate_result(result.value) + return SqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, distributed_joins, + local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, + max_rows, timeout) diff --git a/pyignite/connection/__init__.py b/pyignite/connection/__init__.py index 1114594..14e820a 100644 --- a/pyignite/connection/__init__.py +++ b/pyignite/connection/__init__.py @@ -34,5 +34,6 @@ """ from .connection import Connection +from .aio_connection import AioConnection -__all__ = ['Connection'] +__all__ = ['Connection', 'AioConnection'] diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py new file mode 100644 index 0000000..e5c11da --- /dev/null +++ b/pyignite/connection/aio_connection.py @@ -0,0 +1,242 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from asyncio import Lock +from collections import OrderedDict +from io import BytesIO +from typing import Union + +from pyignite.constants import PROTOCOLS, PROTOCOL_BYTE_ORDER +from pyignite.exceptions import HandshakeError, SocketError, connection_errors +from .connection import BaseConnection + +from .handshake import HandshakeRequest, HandshakeResponse +from .ssl import create_ssl_context +from ..stream import AioBinaryStream + + +class AioConnection(BaseConnection): + """ + Asyncio connection to Ignite node. It serves multiple purposes: + + * wrapper of asyncio streams. See also https://docs.python.org/3/library/asyncio-stream.html + * encapsulates handshake and reconnection. + """ + + def __init__(self, client: 'AioClient', host: str, port: int, username: str = None, password: str = None, + **ssl_params): + """ + Initialize connection. + + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + + :param client: Ignite client object, + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number, + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.1, as in Ignite 2.5, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. + """ + super().__init__(client, host, port, username, password, **ssl_params) + self._mux = Lock() + self._reader = None + self._writer = None + + @property + def closed(self) -> bool: + """ Tells if socket is closed. """ + return self._writer is None + + async def connect(self) -> Union[dict, OrderedDict]: + """ + Connect to the given server node with protocol version fallback. + """ + async with self._mux: + return await self._connect() + + async def _connect(self) -> Union[dict, OrderedDict]: + detecting_protocol = False + + # choose highest version first + if self.client.protocol_version is None: + detecting_protocol = True + self.client.protocol_version = max(PROTOCOLS) + + try: + result = await self._connect_version() + except HandshakeError as e: + if e.expected_version in PROTOCOLS: + self.client.protocol_version = e.expected_version + result = await self._connect_version() + else: + raise e + except connection_errors: + # restore undefined protocol version + if detecting_protocol: + self.client.protocol_version = None + raise + + # connection is ready for end user + self.uuid = result.get('node_uuid', None) # version-specific (1.4+) + + self.failed = False + return result + + async def _connect_version(self) -> Union[dict, OrderedDict]: + """ + Connect to the given server node using protocol version + defined on client. + """ + + ssl_context = create_ssl_context(self.ssl_params) + self._reader, self._writer = await asyncio.open_connection(self.host, self.port, ssl=ssl_context) + + protocol_version = self.client.protocol_version + + hs_request = HandshakeRequest( + protocol_version, + self.username, + self.password + ) + + with AioBinaryStream(self.client) as stream: + await hs_request.from_python_async(stream) + await self._send(stream.getbuffer(), reconnect=False) + + with AioBinaryStream(self.client, await self._recv(reconnect=False)) as stream: + hs_response = await HandshakeResponse.parse_async(stream, self.protocol_version) + + if hs_response.op_code == 0: + self._close() + self._process_handshake_error(hs_response) + + return hs_response + + async def reconnect(self): + async with self._mux: + await self._reconnect() + + async def _reconnect(self): + if self.alive: + return + + self._close() + + # connect and silence the connection errors + try: + await self._connect() + except connection_errors: + pass + + async def request(self, data: Union[bytes, bytearray, memoryview]) -> bytearray: + """ + Perform request. + + :param data: bytes to send. + """ + async with self._mux: + await self._send(data) + return await self._recv() + + async def _send(self, data: Union[bytes, bytearray, memoryview], reconnect=True): + if self.closed: + raise SocketError('Attempt to use closed connection.') + + try: + self._writer.write(data) + await self._writer.drain() + except connection_errors: + self.failed = True + if reconnect: + await self._reconnect() + raise + + async def _recv(self, reconnect=True) -> bytearray: + if self.closed: + raise SocketError('Attempt to use closed connection.') + + with BytesIO() as stream: + try: + buf = await self._reader.readexactly(4) + response_len = int.from_bytes(buf, PROTOCOL_BYTE_ORDER) + + stream.write(buf) + + stream.write(await self._reader.readexactly(response_len)) + except connection_errors: + self.failed = True + if reconnect: + await self._reconnect() + raise + + return bytearray(stream.getbuffer()) + + async def close(self): + async with self._mux: + self._close() + + def _close(self): + """ + Close connection. + """ + if self._writer: + try: + self._writer.close() + except connection_errors: + pass + + self._writer, self._reader = None, None diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 8db304e..901cb56 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -32,64 +32,94 @@ import socket from typing import Union -from pyignite.constants import * -from pyignite.exceptions import ( - HandshakeError, ParameterError, SocketError, connection_errors, AuthenticationError, -) -from pyignite.datatypes import Byte, Int, Short, String, UUIDObject -from pyignite.datatypes.internal import Struct +from pyignite.constants import PROTOCOLS, IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER +from pyignite.exceptions import HandshakeError, SocketError, connection_errors, AuthenticationError -from .handshake import HandshakeRequest -from .ssl import wrap -from ..stream import BinaryStream, READ_BACKWARD +from .handshake import HandshakeRequest, HandshakeResponse +from .ssl import wrap, check_ssl_params +from ..stream import BinaryStream CLIENT_STATUS_AUTH_FAILURE = 2000 -class Connection: +class BaseConnection: + def __init__(self, client, host: str = None, port: int = None, username: str = None, password: str = None, + **ssl_params): + self.client = client + self.host = host if host else IGNITE_DEFAULT_HOST + self.port = port if port else IGNITE_DEFAULT_PORT + self.username = username + self.password = password + self.uuid = None + + check_ssl_params(ssl_params) + + if self.username and self.password and 'use_ssl' not in ssl_params: + ssl_params['use_ssl'] = True + + self.ssl_params = ssl_params + self._failed = False + + @property + def closed(self) -> bool: + """ Tells if socket is closed. """ + raise NotImplementedError + + @property + def failed(self) -> bool: + """ Tells if connection is failed. """ + return self._failed + + @failed.setter + def failed(self, value): + self._failed = value + + @property + def alive(self) -> bool: + """ Tells if connection is up and no failure detected. """ + return not self.failed and not self.closed + + def __repr__(self) -> str: + return '{}:{}'.format(self.host or '?', self.port or '?') + + @property + def protocol_version(self): + """ + Returns the tuple of major, minor, and revision numbers of the used + thin protocol version, or None, if no connection to the Ignite cluster + was yet established. + """ + return self.client.protocol_version + + def _process_handshake_error(self, response): + error_text = f'Handshake error: {response.message}' + # if handshake fails for any reason other than protocol mismatch + # (i.e. authentication error), server version is 0.0.0 + protocol_version = self.client.protocol_version + server_version = (response.version_major, response.version_minor, response.version_patch) + + if any(server_version): + error_text += f' Server expects binary protocol version ' \ + f'{server_version[0]}.{server_version[1]}.{server_version[2]}. ' \ + f'Client provides ' \ + f'{protocol_version[0]}.{protocol_version[1]}.{protocol_version[2]}.' + elif response.client_status == CLIENT_STATUS_AUTH_FAILURE: + raise AuthenticationError(error_text) + raise HandshakeError(server_version, error_text) + + +class Connection(BaseConnection): """ This is a `pyignite` class, that represents a connection to Ignite node. It serves multiple purposes: * socket wrapper. Detects fragmentation and network errors. See also https://docs.python.org/3/howto/sockets.html, - * binary protocol connector. Incapsulates handshake and failover reconnection. + * binary protocol connector. Encapsulates handshake and failover reconnection. """ - _socket = None - _failed = None - - client = None - host = None - port = None - timeout = None - username = None - password = None - ssl_params = {} - uuid = None - - @staticmethod - def _check_ssl_params(params): - expected_args = [ - 'use_ssl', - 'ssl_version', - 'ssl_ciphers', - 'ssl_cert_reqs', - 'ssl_keyfile', - 'ssl_keyfile_password', - 'ssl_certfile', - 'ssl_ca_certfile', - ] - for param in params: - if param not in expected_args: - raise ParameterError(( - 'Unexpected parameter for connection initialization: `{}`' - ).format(param)) - - def __init__( - self, client: 'Client', timeout: float = 2.0, - username: str = None, password: str = None, **ssl_params - ): + def __init__(self, client: 'Client', host: str, port: int, timeout: float = 2.0, + username: str = None, password: str = None, **ssl_params): """ Initialize connection. @@ -97,6 +127,8 @@ def __init__( https://docs.python.org/3/library/ssl.html#ssl-certificates. :param client: Ignite client object, + :param host: Ignite server node's host name or IP, + :param port: Ignite server node's port number, :param timeout: (optional) sets timeout (in seconds) for each socket operation including `connect`. 0 means non-blocking mode, which is virtually guaranteed to fail. Can accept integer or float value. @@ -130,84 +162,15 @@ def __init__( cluster, :param password: (optional) password to authenticate to Ignite cluster. """ - self.client = client + super().__init__(client, host, port, username, password, **ssl_params) self.timeout = timeout - self.username = username - self.password = password - self._check_ssl_params(ssl_params) - if self.username and self.password and 'use_ssl' not in ssl_params: - ssl_params['use_ssl'] = True - self.ssl_params = ssl_params - self._failed = False + self._socket = None @property def closed(self) -> bool: - """ Tells if socket is closed. """ return self._socket is None - @property - def failed(self) -> bool: - """ Tells if connection is failed. """ - return self._failed - - @failed.setter - def failed(self, value): - self._failed = value - - @property - def alive(self) -> bool: - """ Tells if connection is up and no failure detected. """ - return not self.failed and not self.closed - - def __repr__(self) -> str: - return '{}:{}'.format(self.host or '?', self.port or '?') - - _wrap = wrap - - def get_protocol_version(self): - """ - Returns the tuple of major, minor, and revision numbers of the used - thin protocol version, or None, if no connection to the Ignite cluster - was yet established. - """ - return self.client.protocol_version - - def read_response(self) -> Union[dict, OrderedDict]: - """ - Processes server's response to the handshake request. - - :return: handshake data. - """ - response_start = Struct([ - ('length', Int), - ('op_code', Byte), - ]) - with BinaryStream(self, self.recv(reconnect=False)) as stream: - start_class = response_start.parse(stream) - start = stream.read_ctype(start_class, direction=READ_BACKWARD) - data = response_start.to_python(start) - response_end = None - if data['op_code'] == 0: - response_end = Struct([ - ('version_major', Short), - ('version_minor', Short), - ('version_patch', Short), - ('message', String), - ('client_status', Int) - ]) - elif self.get_protocol_version() >= (1, 4, 0): - response_end = Struct([ - ('node_uuid', UUIDObject), - ]) - if response_end: - end_class = response_end.parse(stream) - end = stream.read_ctype(end_class, direction=READ_BACKWARD) - data.update(response_end.to_python(end)) - return data - - def connect( - self, host: str = None, port: int = None - ) -> Union[dict, OrderedDict]: + def connect(self) -> Union[dict, OrderedDict]: """ Connect to the given server node with protocol version fallback. @@ -222,11 +185,11 @@ def connect( self.client.protocol_version = max(PROTOCOLS) try: - result = self._connect_version(host, port) + result = self._connect_version() except HandshakeError as e: if e.expected_version in PROTOCOLS: self.client.protocol_version = e.expected_version - result = self._connect_version(host, port) + result = self._connect_version() else: raise e except connection_errors: @@ -237,28 +200,19 @@ def connect( # connection is ready for end user self.uuid = result.get('node_uuid', None) # version-specific (1.4+) - self.failed = False return result - def _connect_version( - self, host: str = None, port: int = None, - ) -> Union[dict, OrderedDict]: + def _connect_version(self) -> Union[dict, OrderedDict]: """ Connect to the given server node using protocol version defined on client. - - :param host: Ignite server node's host name or IP, - :param port: Ignite server node's port number. """ - host = host or IGNITE_DEFAULT_HOST - port = port or IGNITE_DEFAULT_PORT - self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.settimeout(self.timeout) - self._socket = self._wrap(self._socket) - self._socket.connect((host, port)) + self._socket = wrap(self._socket, self.ssl_params) + self._socket.connect((self.host, self.port)) protocol_version = self.client.protocol_version @@ -268,56 +222,41 @@ def _connect_version( self.password ) - with BinaryStream(self) as stream: + with BinaryStream(self.client) as stream: hs_request.from_python(stream) self.send(stream.getbuffer(), reconnect=False) - hs_response = self.read_response() - if hs_response['op_code'] == 0: - self.close() - - error_text = 'Handshake error: {}'.format(hs_response['message']) - # if handshake fails for any reason other than protocol mismatch - # (i.e. authentication error), server version is 0.0.0 - if any([ - hs_response['version_major'], - hs_response['version_minor'], - hs_response['version_patch'], - ]): - error_text += ( - ' Server expects binary protocol version ' - '{version_major}.{version_minor}.{version_patch}. Client ' - 'provides {client_major}.{client_minor}.{client_patch}.' - ).format( - client_major=protocol_version[0], - client_minor=protocol_version[1], - client_patch=protocol_version[2], - **hs_response - ) - elif hs_response['client_status'] == CLIENT_STATUS_AUTH_FAILURE: - raise AuthenticationError(error_text) - raise HandshakeError(( - hs_response['version_major'], - hs_response['version_minor'], - hs_response['version_patch'], - ), error_text) - self.host, self.port = host, port - return hs_response + with BinaryStream(self.client, self.recv(reconnect=False)) as stream: + hs_response = HandshakeResponse.parse(stream, self.protocol_version) + + if hs_response.op_code == 0: + self.close() + self._process_handshake_error(hs_response) + + return hs_response def reconnect(self): - # do not reconnect if connection is already working - # or was closed on purpose - if not self.failed: + if self.alive: return self.close() # connect and silence the connection errors try: - self.connect(self.host, self.port) + self.connect() except connection_errors: pass + def request(self, data: Union[bytes, bytearray, memoryview], flags=None) -> bytearray: + """ + Perform request. + + :param data: bytes to send, + :param flags: (optional) OS-specific flags. + """ + self.send(data, flags=flags) + return self.recv() + def send(self, data: Union[bytes, bytearray, memoryview], flags=None, reconnect=True): """ Send data down the socket. @@ -337,7 +276,8 @@ def send(self, data: Union[bytes, bytearray, memoryview], flags=None, reconnect= self._socket.sendall(data, **kwargs) except connection_errors: self.failed = True - self.reconnect() + if reconnect: + self.reconnect() raise def recv(self, flags=None, reconnect=True) -> bytearray: diff --git a/pyignite/connection/handshake.py b/pyignite/connection/handshake.py index 3315c4e..0b0fe50 100644 --- a/pyignite/connection/handshake.py +++ b/pyignite/connection/handshake.py @@ -15,8 +15,9 @@ from typing import Optional, Tuple -from pyignite.datatypes import Byte, Int, Short, String +from pyignite.datatypes import Byte, Int, Short, String, UUIDObject from pyignite.datatypes.internal import Struct +from pyignite.stream import READ_BACKWARD OP_HANDSHAKE = 1 @@ -51,6 +52,12 @@ def __init__( self.handshake_struct = Struct(fields) def from_python(self, stream): + self.handshake_struct.from_python(stream, self.__create_handshake_data()) + + async def from_python_async(self, stream): + await self.handshake_struct.from_python_async(stream, self.__create_handshake_data()) + + def __create_handshake_data(self): handshake_data = { 'length': 8, 'op_code': OP_HANDSHAKE, @@ -69,5 +76,66 @@ def from_python(self, stream): len(self.username), len(self.password), ]) + return handshake_data + + +class HandshakeResponse(dict): + """ + Handshake response. + """ + __response_start = Struct([ + ('length', Int), + ('op_code', Byte), + ]) + + def __init__(self, data): + super().__init__() + self.update(data) + + def __getattr__(self, item): + return self.get(item) + + @classmethod + def parse(cls, stream, protocol_version): + start_class = cls.__response_start.parse(stream) + start = stream.read_ctype(start_class, direction=READ_BACKWARD) + data = cls.__response_start.to_python(start) - self.handshake_struct.from_python(stream, handshake_data) + response_end = cls.__create_response_end(data, protocol_version) + if response_end: + end_class = response_end.parse(stream) + end = stream.read_ctype(end_class, direction=READ_BACKWARD) + data.update(response_end.to_python(end)) + + return cls(data) + + @classmethod + async def parse_async(cls, stream, protocol_version): + start_class = cls.__response_start.parse(stream) + start = stream.read_ctype(start_class, direction=READ_BACKWARD) + data = await cls.__response_start.to_python_async(start) + + response_end = cls.__create_response_end(data, protocol_version) + if response_end: + end_class = await response_end.parse_async(stream) + end = stream.read_ctype(end_class, direction=READ_BACKWARD) + data.update(await response_end.to_python_async(end)) + + return cls(data) + + @classmethod + def __create_response_end(cls, start_data, protocol_version): + response_end = None + if start_data['op_code'] == 0: + response_end = Struct([ + ('version_major', Short), + ('version_minor', Short), + ('version_patch', Short), + ('message', String), + ('client_status', Int) + ]) + elif protocol_version >= (1, 4, 0): + response_end = Struct([ + ('node_uuid', UUIDObject), + ]) + return response_end diff --git a/pyignite/connection/ssl.py b/pyignite/connection/ssl.py index 9773860..385b414 100644 --- a/pyignite/connection/ssl.py +++ b/pyignite/connection/ssl.py @@ -16,34 +16,62 @@ import ssl from ssl import SSLContext -from pyignite.constants import * +from pyignite.constants import SSL_DEFAULT_CIPHERS, SSL_DEFAULT_VERSION +from pyignite.exceptions import ParameterError -def wrap(conn: 'Connection', _socket): +def wrap(socket, ssl_params): """ Wrap socket in SSL wrapper. """ - if conn.ssl_params.get('use_ssl', None): - keyfile = conn.ssl_params.get('ssl_keyfile', None) - certfile = conn.ssl_params.get('ssl_certfile', None) + if not ssl_params.get('use_ssl'): + return socket - if keyfile and not certfile: - raise ValueError("certfile must be specified") + context = create_ssl_context(ssl_params) - password = conn.ssl_params.get('ssl_keyfile_password', None) - ssl_version = conn.ssl_params.get('ssl_version', SSL_DEFAULT_VERSION) - ciphers = conn.ssl_params.get('ssl_ciphers', SSL_DEFAULT_CIPHERS) - cert_reqs = conn.ssl_params.get('ssl_cert_reqs', ssl.CERT_NONE) - ca_certs = conn.ssl_params.get('ssl_ca_certfile', None) + return context.wrap_socket(sock=socket) - context = SSLContext(ssl_version) - context.verify_mode = cert_reqs - if ca_certs: - context.load_verify_locations(ca_certs) - if certfile: - context.load_cert_chain(certfile, keyfile, password) - if ciphers: - context.set_ciphers(ciphers) +def check_ssl_params(params): + expected_args = [ + 'use_ssl', + 'ssl_version', + 'ssl_ciphers', + 'ssl_cert_reqs', + 'ssl_keyfile', + 'ssl_keyfile_password', + 'ssl_certfile', + 'ssl_ca_certfile', + ] + for param in params: + if param not in expected_args: + raise ParameterError(( + 'Unexpected parameter for connection initialization: `{}`' + ).format(param)) - _socket = context.wrap_socket(sock=_socket) - return _socket +def create_ssl_context(ssl_params): + if not ssl_params.get('use_ssl'): + return None + + keyfile = ssl_params.get('ssl_keyfile', None) + certfile = ssl_params.get('ssl_certfile', None) + + if keyfile and not certfile: + raise ValueError("certfile must be specified") + + password = ssl_params.get('ssl_keyfile_password', None) + ssl_version = ssl_params.get('ssl_version', SSL_DEFAULT_VERSION) + ciphers = ssl_params.get('ssl_ciphers', SSL_DEFAULT_CIPHERS) + cert_reqs = ssl_params.get('ssl_cert_reqs', ssl.CERT_NONE) + ca_certs = ssl_params.get('ssl_ca_certfile', None) + + context = SSLContext(ssl_version) + context.verify_mode = cert_reqs + + if ca_certs: + context.load_verify_locations(ca_certs) + if certfile: + context.load_cert_chain(certfile, keyfile, password) + if ciphers: + context.set_ciphers(ciphers) + + return context diff --git a/pyignite/cursors.py b/pyignite/cursors.py new file mode 100644 index 0000000..c699556 --- /dev/null +++ b/pyignite/cursors.py @@ -0,0 +1,319 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains sync and async cursors for different types of queries. +""" + +import asyncio + +from pyignite.api import ( + scan, scan_cursor_get_page, resource_close, scan_async, scan_cursor_get_page_async, resource_close_async, sql, + sql_cursor_get_page, sql_fields, sql_fields_cursor_get_page, sql_fields_cursor_get_page_async, sql_fields_async +) +from pyignite.exceptions import CacheError, SQLError + + +__all__ = ['ScanCursor', 'SqlCursor', 'SqlFieldsCursor', 'AioScanCursor', 'AioSqlFieldsCursor'] + + +class BaseCursorMixin: + @property + def connection(self): + return getattr(self, '_conn', None) + + @connection.setter + def connection(self, value): + setattr(self, '_conn', value) + + @property + def cursor_id(self): + return getattr(self, '_cursor_id', None) + + @cursor_id.setter + def cursor_id(self, value): + setattr(self, '_cursor_id', value) + + @property + def more(self): + return getattr(self, '_more', None) + + @more.setter + def more(self, value): + setattr(self, '_more', value) + + @property + def cache_id(self): + return getattr(self, '_cache_id', None) + + @cache_id.setter + def cache_id(self, value): + setattr(self, '_cache_id', value) + + @property + def client(self): + return getattr(self, '_client', None) + + @client.setter + def client(self, value): + setattr(self, '_client', value) + + @property + def data(self): + return getattr(self, '_data', None) + + @data.setter + def data(self, value): + setattr(self, '_data', value) + + +class CursorMixin(BaseCursorMixin): + def __enter__(self): + return self + + def __iter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + if self.connection and self.cursor_id and self.more: + resource_close(self.connection, self.cursor_id) + + +class AioCursorMixin(BaseCursorMixin): + def __await__(self): + return (yield from self.__aenter__().__await__()) + + def __aiter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + async def close(self): + if self.connection and self.cursor_id and self.more: + await resource_close_async(self.connection, self.cursor_id) + + +class AbstractScanCursor: + def __init__(self, client, cache_id, page_size, partitions, local): + self.client = client + self.cache_id = cache_id + self._page_size = page_size + self._partitions = partitions + self._local = local + + def _finalize_init(self, result): + if result.status != 0: + raise CacheError(result.message) + + self.cursor_id, self.more = result.value['cursor'], result.value['more'] + self.data = iter(result.value['data'].items()) + + def _process_page_response(self, result): + if result.status != 0: + raise CacheError(result.message) + + self.data, self.more = iter(result.value['data'].items()), result.value['more'] + + +class ScanCursor(AbstractScanCursor, CursorMixin): + def __init__(self, client, cache_id, page_size, partitions, local): + super().__init__(client, cache_id, page_size, partitions, local) + + self.connection = self.client.random_node + result = scan(self.connection, self.cache_id, self._page_size, self._partitions, self._local) + self._finalize_init(result) + + def __next__(self): + if not self.data: + raise StopIteration + + try: + k, v = next(self.data) + except StopIteration: + if self.more: + self._process_page_response(scan_cursor_get_page(self.connection, self.cursor_id)) + k, v = next(self.data) + else: + raise StopIteration + + return self.client.unwrap_binary(k), self.client.unwrap_binary(v) + + +class AioScanCursor(AbstractScanCursor, AioCursorMixin): + def __init__(self, client, cache_id, page_size, partitions, local): + super().__init__(client, cache_id, page_size, partitions, local) + + async def __aenter__(self): + if not self.connection: + self.connection = await self.client.random_node() + result = await scan_async(self.connection, self.cache_id, self._page_size, self._partitions, self._local) + self._finalize_init(result) + return self + + async def __anext__(self): + if not self.connection: + raise CacheError("Using uninitialized cursor, initialize it using async with expression.") + + if not self.data: + raise StopAsyncIteration + + try: + k, v = next(self.data) + except StopIteration: + if self.more: + self._process_page_response(await scan_cursor_get_page_async(self.connection, self.cursor_id)) + try: + k, v = next(self.data) + except StopIteration: + raise StopAsyncIteration + else: + raise StopAsyncIteration + + return await asyncio.gather( + *[self.client.unwrap_binary(k), self.client.unwrap_binary(v)] + ) + + +class SqlCursor(CursorMixin): + def __init__(self, client, cache_id, *args, **kwargs): + self.client = client + self.cache_id = cache_id + self.connection = self.client.random_node + result = sql(self.connection, self.cache_id, *args, **kwargs) + if result.status != 0: + raise SQLError(result.message) + + self.cursor_id, self.more = result.value['cursor'], result.value['more'] + self.data = iter(result.value['data'].items()) + + def __next__(self): + if not self.data: + raise StopIteration + + try: + k, v = next(self.data) + except StopIteration: + if self.more: + result = sql_cursor_get_page(self.connection, self.cursor_id) + if result.status != 0: + raise SQLError(result.message) + self.data, self.more = iter(result.value['data'].items()), result.value['more'] + + k, v = next(self.data) + else: + raise StopIteration + + return self.client.unwrap_binary(k), self.client.unwrap_binary(v) + + +class AbstractSqlFieldsCursor: + def __init__(self, client, cache_id): + self.client = client + self.cache_id = cache_id + + def _finalize_init(self, result): + if result.status != 0: + raise SQLError(result.message) + + self.cursor_id, self.more = result.value['cursor'], result.value['more'] + self.data = iter(result.value['data']) + self._field_names = result.value.get('fields', None) + if self._field_names: + self._field_count = len(self._field_names) + else: + self._field_count = result.value['field_count'] + + +class SqlFieldsCursor(AbstractSqlFieldsCursor, CursorMixin): + def __init__(self, client, cache_id, *args, **kwargs): + super().__init__(client, cache_id) + self.connection = self.client.random_node + self._finalize_init(sql_fields(self.connection, self.cache_id, *args, **kwargs)) + + def __next__(self): + if not self.data: + raise StopIteration + + if self._field_names: + result = self._field_names + self._field_names = None + return result + + try: + row = next(self.data) + except StopIteration: + if self.more: + result = sql_fields_cursor_get_page(self.connection, self.cursor_id, self._field_count) + if result.status != 0: + raise SQLError(result.message) + + self.data, self.more = iter(result.value['data']), result.value['more'] + + row = next(self.data) + else: + raise StopIteration + + return [self.client.unwrap_binary(v) for v in row] + + +class AioSqlFieldsCursor(AbstractSqlFieldsCursor, AioCursorMixin): + def __init__(self, client, cache_id, *args, **kwargs): + super().__init__(client, cache_id) + self._params = (args, kwargs) + + async def __aenter__(self): + await self._initialize(*self._params[0], *self._params[1]) + return self + + async def __anext__(self): + if not self.connection: + raise SQLError("Attempting to use uninitialized aio cursor, please await on it or use with expression.") + + if not self.data: + raise StopAsyncIteration + + if self._field_names: + result = self._field_names + self._field_names = None + return result + + try: + row = next(self.data) + except StopIteration: + if self.more: + result = await sql_fields_cursor_get_page_async(self.connection, self.cursor_id, self._field_count) + if result.status != 0: + raise SQLError(result.message) + + self.data, self.more = iter(result.value['data']), result.value['more'] + try: + row = next(self.data) + except StopIteration: + raise StopAsyncIteration + else: + raise StopAsyncIteration + + return await asyncio.gather(*[self.client.unwrap_binary(v) for v in row]) + + async def _initialize(self, *args, **kwargs): + if self.connection and self.cursor_id: + return + + self.connection = await self.client.random_node() + self._finalize_init(await sql_fields_async(self.connection, self.cache_id, *args, **kwargs)) diff --git a/pyignite/datatypes/__init__.py b/pyignite/datatypes/__init__.py index 49860bd..5024f79 100644 --- a/pyignite/datatypes/__init__.py +++ b/pyignite/datatypes/__init__.py @@ -25,22 +25,3 @@ from .primitive_arrays import * from .primitive_objects import * from .standard import * -from ..stream import BinaryStream, READ_BACKWARD - - -def unwrap_binary(client: 'Client', wrapped: tuple) -> object: - """ - Unwrap wrapped BinaryObject and convert it to Python data. - - :param client: connection to Ignite cluster, - :param wrapped: `WrappedDataObject` value, - :return: dict representing wrapped BinaryObject. - """ - from pyignite.datatypes.complex import BinaryObject - - blob, offset = wrapped - with BinaryStream(client.random_node, blob) as stream: - data_class = BinaryObject.parse(stream) - result = BinaryObject.to_python(stream.read_ctype(data_class, direction=READ_BACKWARD), client) - - return result diff --git a/pyignite/datatypes/base.py b/pyignite/datatypes/base.py index 25b5b1e..fbd798b 100644 --- a/pyignite/datatypes/base.py +++ b/pyignite/datatypes/base.py @@ -47,4 +47,34 @@ class IgniteDataType(metaclass=IgniteDataTypeMeta): This is a base class for all Ignite data types, a.k.a. parser/constructor classes, both object and payload varieties. """ - pass + @classmethod + async def hashcode_async(cls, value, *args, **kwargs): + return cls.hashcode(value, *args, **kwargs) + + @classmethod + def hashcode(cls, value, *args, **kwargs): + return 0 + + @classmethod + def parse(cls, stream): + raise NotImplementedError + + @classmethod + async def parse_async(cls, stream): + return cls.parse(stream) + + @classmethod + def from_python(cls, stream, value, **kwargs): + raise NotImplementedError + + @classmethod + async def from_python_async(cls, stream, value, **kwargs): + cls.from_python(stream, value, **kwargs) + + @classmethod + def to_python(cls, ctype_object, *args, **kwargs): + raise NotImplementedError + + @classmethod + async def to_python_async(cls, ctype_object, *args, **kwargs): + return cls.to_python(ctype_object, *args, **kwargs) diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index eadaef9..127b6f3 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -23,7 +23,6 @@ from .primitive import * from .standard import * - __all__ = [ 'PropName', 'PropCacheMode', 'PropCacheAtomicityMode', 'PropBackupsNumber', 'PropWriteSynchronizationMode', 'PropCopyOnRead', 'PropReadFromBackup', @@ -81,7 +80,7 @@ class PropBase: @classmethod def build_header(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -111,11 +110,17 @@ def parse(cls, stream): stream.seek(init_pos + ctypes.sizeof(prop_class)) return prop_class + @classmethod + async def parse_async(cls, stream): + return cls.parse(stream) + @classmethod def to_python(cls, ctype_object, *args, **kwargs): - return cls.prop_data_class.to_python( - ctype_object.data, *args, **kwargs - ) + return cls.prop_data_class.to_python(ctype_object.data, *args, **kwargs) + + @classmethod + async def to_python_async(cls, ctype_object, *args, **kwargs): + return cls.to_python(ctype_object, *args, **kwargs) @classmethod def from_python(cls, stream, value): @@ -125,6 +130,10 @@ def from_python(cls, stream, value): stream.write(bytes(header)) cls.prop_data_class.from_python(stream, value) + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + class PropName(PropBase): prop_code = PROP_NAME diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index b8d9c02..5cb6160 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -12,30 +12,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import asyncio from collections import OrderedDict import ctypes from io import SEEK_CUR -from typing import Iterable, Dict +from typing import Optional from pyignite.constants import * from pyignite.exceptions import ParseError -from .base import IgniteDataType -from .internal import AnyDataObject, infer_from_python +from .internal import AnyDataObject, Struct, infer_from_python, infer_from_python_async from .type_codes import * from .type_ids import * from .type_names import * from .null_object import Null, Nullable +from ..stream import AioBinaryStream, BinaryStream -__all__ = [ - 'Map', 'ObjectArrayObject', 'CollectionObject', 'MapObject', - 'WrappedDataObject', 'BinaryObject', -] - -from ..stream import BinaryStream +__all__ = ['Map', 'ObjectArrayObject', 'CollectionObject', 'MapObject', 'WrappedDataObject', 'BinaryObject'] -class ObjectArrayObject(IgniteDataType, Nullable): +class ObjectArrayObject(Nullable): """ Array of Ignite objects of any consistent type. Its Python representation is tuple(type_id, iterable of any type). The only type ID that makes sense @@ -48,15 +43,10 @@ class ObjectArrayObject(IgniteDataType, Nullable): _type_id = TYPE_OBJ_ARR type_code = TC_OBJECT_ARRAY - @staticmethod - def hashcode(value: Iterable) -> int: - # Arrays are not supported as keys at the moment. - return 0 - @classmethod def build_header(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -70,16 +60,36 @@ def build_header(cls): @classmethod def parse_not_null(cls, stream): - header_class = cls.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + header, header_class = cls.__parse_header(stream) fields = [] for i in range(header.length): c_type = AnyDataObject.parse(stream) fields.append(('element_{}'.format(i), c_type)) - final_class = type( + return cls.__build_final_class(header_class, fields) + + @classmethod + async def parse_not_null_async(cls, stream): + header, header_class = cls.__parse_header(stream) + + fields = [] + for i in range(header.length): + c_type = await AnyDataObject.parse_async(stream) + fields.append(('element_{}'.format(i), c_type)) + + return cls.__build_final_class(header_class, fields) + + @classmethod + def __parse_header(cls, stream): + header_class = cls.build_header() + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + return header, header_class + + @classmethod + def __build_final_class(cls, header_class, fields): + return type( cls.__name__, (header_class,), { @@ -88,8 +98,6 @@ def parse_not_null(cls, stream): } ) - return final_class - @classmethod def to_python_not_null(cls, ctype_object, *args, **kwargs): result = [] @@ -103,28 +111,55 @@ def to_python_not_null(cls, ctype_object, *args, **kwargs): return ctype_object.type_id, result @classmethod - def from_python_not_null(cls, stream, value): + async def to_python_not_null_async(cls, ctype_object, *args, **kwargs): + result = [ + await AnyDataObject.to_python_async( + getattr(ctype_object, 'element_{}'.format(i)), *args, **kwargs + ) + for i in range(ctype_object.length)] + return ctype_object.type_id, result + + @classmethod + def from_python_not_null(cls, stream, value, *args, **kwargs): + type_or_id, value = value + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + + cls.__write_header(stream, type_or_id, length) + for x in value: + infer_from_python(stream, x) + + @classmethod + async def from_python_not_null_async(cls, stream, value, *args, **kwargs): type_or_id, value = value + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + + cls.__write_header(stream, type_or_id, length) + for x in value: + await infer_from_python_async(stream, x) + + @classmethod + def __write_header(cls, stream, type_or_id, length): header_class = cls.build_header() header = header_class() header.type_code = int.from_bytes( cls.type_code, byteorder=PROTOCOL_BYTE_ORDER ) - try: - length = len(value) - except TypeError: - value = [value] - length = 1 header.length = length header.type_id = type_or_id stream.write(header) - for x in value: - infer_from_python(stream, x) -class WrappedDataObject(IgniteDataType, Nullable): +class WrappedDataObject(Nullable): """ One or more binary objects can be wrapped in an array. This allows reading, storing, passing and writing objects efficiently without understanding @@ -138,7 +173,7 @@ class WrappedDataObject(IgniteDataType, Nullable): @classmethod def build_header(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -160,7 +195,7 @@ def parse_not_null(cls, stream): { '_pack_': 1, '_fields_': [ - ('payload', ctypes.c_byte*header.length), + ('payload', ctypes.c_byte * header.length), ('offset', ctypes.c_int), ], } @@ -170,15 +205,15 @@ def parse_not_null(cls, stream): return final_class @classmethod - def to_python(cls, ctype_object, *args, **kwargs): + def to_python_not_null(cls, ctype_object, *args, **kwargs): return bytes(ctype_object.payload), ctype_object.offset @classmethod - def from_python(cls, stream, value): + def from_python(cls, stream, value, *args, **kwargs): raise ParseError('Send unwrapped data.') -class CollectionObject(IgniteDataType, Nullable): +class CollectionObject(Nullable): """ Similar to object array, but contains platform-agnostic deserialization type hint instead of type ID. @@ -220,15 +255,10 @@ class CollectionObject(IgniteDataType, Nullable): pythonic = list default = [] - @staticmethod - def hashcode(value: Iterable) -> int: - # Collections are not supported as keys at the moment. - return 0 - @classmethod def build_header(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -242,16 +272,36 @@ def build_header(cls): @classmethod def parse_not_null(cls, stream): - header_class = cls.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + header, header_class = cls.__parse_header(stream) fields = [] for i in range(header.length): c_type = AnyDataObject.parse(stream) fields.append(('element_{}'.format(i), c_type)) - final_class = type( + return cls.__build_final_class(header_class, fields) + + @classmethod + async def parse_not_null_async(cls, stream): + header, header_class = cls.__parse_header(stream) + + fields = [] + for i in range(header.length): + c_type = await AnyDataObject.parse_async(stream) + fields.append(('element_{}'.format(i), c_type)) + + return cls.__build_final_class(header_class, fields) + + @classmethod + def __parse_header(cls, stream): + header_class = cls.build_header() + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + return header, header_class + + @classmethod + def __build_final_class(cls, header_class, fields): + return type( cls.__name__, (header_class,), { @@ -259,46 +309,78 @@ def parse_not_null(cls, stream): '_fields_': fields, } ) - return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): - result = [] - length = getattr(ctype_object, "length", None) + length = cls.__get_length(ctype_object) if length is None: return None - for i in range(length): - result.append( - AnyDataObject.to_python( - getattr(ctype_object, 'element_{}'.format(i)), - *args, **kwargs - ) - ) + + result = [ + AnyDataObject.to_python(getattr(ctype_object, f'element_{i}'), *args, **kwargs) + for i in range(length) + ] return ctype_object.type, result @classmethod - def from_python_not_null(cls, stream, value): + async def to_python_async(cls, ctype_object, *args, **kwargs): + length = cls.__get_length(ctype_object) + if length is None: + return None + + result_coro = [ + AnyDataObject.to_python_async(getattr(ctype_object, f'element_{i}'), *args, **kwargs) + for i in range(length) + ] + + return ctype_object.type, await asyncio.gather(*result_coro) + + @classmethod + def __get_length(cls, ctype_object): + return getattr(ctype_object, "length", None) + + @classmethod + def from_python_not_null(cls, stream, value, *args, **kwargs): type_or_id, value = value + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + + cls.__write_header(stream, type_or_id, length) + for x in value: + infer_from_python(stream, x) + + @classmethod + async def from_python_not_null_async(cls, stream, value, *args, **kwargs): + type_or_id, value = value + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + + cls.__write_header(stream, type_or_id, length) + for x in value: + await infer_from_python_async(stream, x) + + @classmethod + def __write_header(cls, stream, type_or_id, length): header_class = cls.build_header() header = header_class() header.type_code = int.from_bytes( cls.type_code, byteorder=PROTOCOL_BYTE_ORDER ) - try: - length = len(value) - except TypeError: - value = [value] - length = 1 + header.length = length header.type = type_or_id stream.write(header) - for x in value: - infer_from_python(stream, x) -class Map(IgniteDataType, Nullable): +class Map(Nullable): """ Dictionary type, payload-only. @@ -310,15 +392,10 @@ class Map(IgniteDataType, Nullable): HASH_MAP = 1 LINKED_HASH_MAP = 2 - @staticmethod - def hashcode(value: Dict) -> int: - # Maps are not supported as keys at the moment. - return 0 - @classmethod def build_header(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -330,16 +407,36 @@ def build_header(cls): @classmethod def parse_not_null(cls, stream): - header_class = cls.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + header, header_class = cls.__parse_header(stream) fields = [] for i in range(header.length << 1): c_type = AnyDataObject.parse(stream) fields.append(('element_{}'.format(i), c_type)) - final_class = type( + return cls.__build_final_class(header_class, fields) + + @classmethod + async def parse_not_null_async(cls, stream): + header, header_class = cls.__parse_header(stream) + + fields = [] + for i in range(header.length << 1): + c_type = await AnyDataObject.parse_async(stream) + fields.append(('element_{}'.format(i), c_type)) + + return cls.__build_final_class(header_class, fields) + + @classmethod + def __parse_header(cls, stream): + header_class = cls.build_header() + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + return header, header_class + + @classmethod + def __build_final_class(cls, header_class, fields): + return type( cls.__name__, (header_class,), { @@ -347,43 +444,75 @@ def parse_not_null(cls, stream): '_fields_': fields, } ) - return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): - map_type = getattr(ctype_object, 'type', cls.HASH_MAP) - result = OrderedDict() if map_type == cls.LINKED_HASH_MAP else {} + map_cls = cls.__get_map_class(ctype_object) + result = map_cls() for i in range(0, ctype_object.length << 1, 2): k = AnyDataObject.to_python( + getattr(ctype_object, 'element_{}'.format(i)), + *args, **kwargs + ) + v = AnyDataObject.to_python( + getattr(ctype_object, 'element_{}'.format(i + 1)), + *args, **kwargs + ) + result[k] = v + return result + + @classmethod + async def to_python_async(cls, ctype_object, *args, **kwargs): + map_cls = cls.__get_map_class(ctype_object) + + kv_pairs_coro = [ + asyncio.gather( + AnyDataObject.to_python_async( getattr(ctype_object, 'element_{}'.format(i)), *args, **kwargs - ) - v = AnyDataObject.to_python( + ), + AnyDataObject.to_python_async( getattr(ctype_object, 'element_{}'.format(i + 1)), *args, **kwargs ) - result[k] = v - return result + ) for i in range(0, ctype_object.length << 1, 2) + ] + + return map_cls(await asyncio.gather(*kv_pairs_coro)) + + @classmethod + def __get_map_class(cls, ctype_object): + map_type = getattr(ctype_object, 'type', cls.HASH_MAP) + return OrderedDict if map_type == cls.LINKED_HASH_MAP else dict @classmethod def from_python(cls, stream, value, type_id=None): + cls.__write_header(stream, type_id, len(value)) + for k, v in value.items(): + infer_from_python(stream, k) + infer_from_python(stream, v) + + @classmethod + async def from_python_async(cls, stream, value, type_id=None): + cls.__write_header(stream, type_id, len(value)) + for k, v in value.items(): + await infer_from_python_async(stream, k) + await infer_from_python_async(stream, v) + + @classmethod + def __write_header(cls, stream, type_id, length): header_class = cls.build_header() header = header_class() - length = len(value) header.length = length + if hasattr(header, 'type_code'): - header.type_code = int.from_bytes( - cls.type_code, - byteorder=PROTOCOL_BYTE_ORDER - ) + header.type_code = int.from_bytes(cls.type_code, byteorder=PROTOCOL_BYTE_ORDER) + if hasattr(header, 'type'): header.type = type_id stream.write(header) - for k, v in value.items(): - infer_from_python(stream, k) - infer_from_python(stream, v) class MapObject(Map): @@ -404,7 +533,7 @@ class MapObject(Map): @classmethod def build_header(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -419,23 +548,43 @@ def build_header(cls): @classmethod def to_python(cls, ctype_object, *args, **kwargs): obj_type = getattr(ctype_object, "type", None) - if obj_type is None: - return None - return obj_type, super().to_python( - ctype_object, *args, **kwargs - ) + if obj_type: + return obj_type, super().to_python(ctype_object, *args, **kwargs) + return None + + @classmethod + async def to_python_async(cls, ctype_object, *args, **kwargs): + obj_type = getattr(ctype_object, "type", None) + if obj_type: + return obj_type, await super().to_python_async(ctype_object, *args, **kwargs) + return None + + @classmethod + def __get_obj_type(cls, ctype_object): + return getattr(ctype_object, "type", None) + + @classmethod + def from_python(cls, stream, value, **kwargs): + type_id, value = cls.__unpack_value(stream, value) + if value: + super().from_python(stream, value, type_id) @classmethod - def from_python(cls, stream, value): + async def from_python_async(cls, stream, value, **kwargs): + type_id, value = cls.__unpack_value(stream, value) + if value: + await super().from_python_async(stream, value, type_id) + + @classmethod + def __unpack_value(cls, stream, value): if value is None: Null.from_python(stream) - return + return None, None - type_id, value = value - super().from_python(stream, value, type_id) + return value -class BinaryObject(IgniteDataType, Nullable): +class BinaryObject(Nullable): _type_id = TYPE_BINARY_OBJ type_code = TC_COMPLEX_OBJECT @@ -446,18 +595,25 @@ class BinaryObject(IgniteDataType, Nullable): OFFSET_TWO_BYTES = 0x0010 COMPACT_FOOTER = 0x0020 - @staticmethod - def hashcode(value: object, client: None) -> int: + @classmethod + def hashcode(cls, value: object, client: Optional['Client']) -> int: # binary objects's hashcode implementation is special in the sense # that you need to fully serialize the object to calculate # its hashcode - if not value._hashcode and client : - - with BinaryStream(client.random_node) as stream: + if not value._hashcode and client: + with BinaryStream(client) as stream: value._from_python(stream, save_to_buf=True) return value._hashcode + @classmethod + async def hashcode_async(cls, value: object, client: Optional['AioClient']) -> int: + if not value._hashcode and client: + with AioBinaryStream(client) as stream: + await value._from_python_async(stream, save_to_buf=True) + + return value._hashcode + @classmethod def build_header(cls): return type( @@ -504,22 +660,47 @@ def schema_type(cls, flags: int): @classmethod def parse_not_null(cls, stream): - from pyignite.datatypes import Struct + header, header_class = cls.__parse_header(stream) + + # ignore full schema, always retrieve fields' types and order + # from complex types registry + data_class = stream.get_dataclass(header) + object_fields_struct = cls.__build_object_fields_struct(data_class) + object_fields = object_fields_struct.parse(stream) + + return cls.__build_final_class(stream, header, header_class, object_fields, + len(object_fields_struct.fields)) + @classmethod + async def parse_not_null_async(cls, stream): + header, header_class = cls.__parse_header(stream) + + # ignore full schema, always retrieve fields' types and order + # from complex types registry + data_class = await stream.get_dataclass(header) + object_fields_struct = cls.__build_object_fields_struct(data_class) + object_fields = await object_fields_struct.parse_async(stream) + + return cls.__build_final_class(stream, header, header_class, object_fields, + len(object_fields_struct.fields)) + + @classmethod + def __parse_header(cls, stream): header_class = cls.build_header() header = stream.read_ctype(header_class) stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + return header, header_class - # ignore full schema, always retrieve fields' types and order - # from complex types registry - data_class = stream.get_dataclass(header) + @staticmethod + def __build_object_fields_struct(data_class): fields = data_class.schema.items() - object_fields_struct = Struct(fields) - object_fields = object_fields_struct.parse(stream) - final_class_fields = [('object_fields', object_fields)] + return Struct(fields) + @classmethod + def __build_final_class(cls, stream, header, header_class, object_fields, fields_len): + final_class_fields = [('object_fields', object_fields)] if header.flags & cls.HAS_SCHEMA: - schema = cls.schema_type(header.flags) * len(fields) + schema = cls.schema_type(header.flags) * fields_len stream.seek(ctypes.sizeof(schema), SEEK_CUR) final_class_fields.append(('schema', schema)) @@ -537,35 +718,71 @@ def parse_not_null(cls, stream): @classmethod def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): - type_id = getattr(ctype_object, "type_id", None) - if type_id is None: - return None + type_id = cls.__get_type_id(ctype_object, client) + if type_id: + data_class = client.query_binary_type(type_id, ctype_object.schema_id) + + result = data_class() + result.version = ctype_object.version + for field_name, field_type in data_class.schema.items(): + setattr( + result, field_name, field_type.to_python( + getattr(ctype_object.object_fields, field_name), + client, *args, **kwargs + ) + ) + return result - if not client: - raise ParseError( - 'Can not query binary type {}'.format(type_id) - ) + return None - data_class = client.query_binary_type( - type_id, - ctype_object.schema_id - ) - result = data_class() - - result.version = ctype_object.version - for field_name, field_type in data_class.schema.items(): - setattr( - result, field_name, field_type.to_python( - getattr(ctype_object.object_fields, field_name), - client, *args, **kwargs - ) + @classmethod + async def to_python_async(cls, ctype_object, client: 'AioClient' = None, *args, **kwargs): + type_id = cls.__get_type_id(ctype_object, client) + if type_id: + data_class = await client.query_binary_type(type_id, ctype_object.schema_id) + + result = data_class() + result.version = ctype_object.version + + field_values = await asyncio.gather( + *[ + field_type.to_python_async( + getattr(ctype_object.object_fields, field_name), client, *args, **kwargs + ) + for field_name, field_type in data_class.schema.items() + ] ) - return result + + for i, field_name in enumerate(data_class.schema.keys()): + setattr(result, field_name, field_values[i]) + + return result + return None @classmethod - def from_python_not_null(cls, stream, value): - if getattr(value, '_buffer', None): - stream.write(value._buffer) - else: + def __get_type_id(cls, ctype_object, client): + type_id = getattr(ctype_object, "type_id", None) + if type_id: + if not client: + raise ParseError(f'Can not query binary type {type_id}') + return type_id + return None + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + if cls.__write_fast_path(stream, value): stream.register_binary_type(value.__class__) value._from_python(stream) + + @classmethod + async def from_python_not_null_async(cls, stream, value, **kwargs): + if cls.__write_fast_path(stream, value): + await stream.register_binary_type(value.__class__) + await value._from_python_async(stream) + + @classmethod + def __write_fast_path(cls, stream, value): + if getattr(value, '_buffer', None): + stream.write(value._buffer) + return False + return True diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index a6da9fe..0de50e2 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -12,26 +12,25 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import asyncio from collections import OrderedDict import ctypes import decimal from datetime import date, datetime, timedelta from io import SEEK_CUR -from typing import Any, Tuple, Union, Callable, List +from typing import Any, Union, Callable, List import uuid import attr -from pyignite.constants import * +from pyignite.constants import PROTOCOL_BYTE_ORDER from pyignite.exceptions import ParseError from pyignite.utils import is_binary, is_hinted, is_iterable from .type_codes import * __all__ = [ - 'AnyDataArray', 'AnyDataObject', 'Struct', 'StructArray', 'tc_map', - 'infer_from_python', + 'AnyDataArray', 'AnyDataObject', 'Struct', 'StructArray', 'tc_map', 'infer_from_python', 'infer_from_python_async' ] from ..stream import READ_BACKWARD @@ -124,11 +123,25 @@ def __init__(self, fields: List, predicate1: Callable[[any], bool], self.var2 = var2 def parse(self, stream, context): - return self.var1.parse(stream) if self.predicate1(context) else self.var2.parse(stream) + if self.predicate1(context): + return self.var1.parse(stream) + return self.var2.parse(stream) + + async def parse_async(self, stream, context): + if self.predicate1(context): + return await self.var1.parse_async(stream) + return await self.var2.parse_async(stream) def to_python(self, ctype_object, context, *args, **kwargs): - return self.var1.to_python(ctype_object, *args, **kwargs) if self.predicate2(context)\ - else self.var2.to_python(ctype_object, *args, **kwargs) + if self.predicate2(context): + return self.var1.to_python(ctype_object, *args, **kwargs) + return self.var2.to_python(ctype_object, *args, **kwargs) + + async def to_python_async(self, ctype_object, context, *args, **kwargs): + if self.predicate2(context): + return await self.var1.to_python_async(ctype_object, *args, **kwargs) + return await self.var2.to_python_async(ctype_object, *args, **kwargs) + @attr.s class StructArray: @@ -139,7 +152,7 @@ class StructArray: def build_header_class(self): return type( - self.__class__.__name__+'Header', + self.__class__.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -150,19 +163,34 @@ def build_header_class(self): ) def parse(self, stream): + fields, length = [], self.__parse_length(stream) + + for i in range(length): + c_type = Struct(self.following).parse(stream) + fields.append(('element_{}'.format(i), c_type)) + + return self.__build_final_class(fields) + + async def parse_async(self, stream): + fields, length = [], self.__parse_length(stream) + + for i in range(length): + c_type = await Struct(self.following).parse_async(stream) + fields.append(('element_{}'.format(i), c_type)) + + return self.__build_final_class(fields) + + def __parse_length(self, stream): counter_type_len = ctypes.sizeof(self.counter_type) length = int.from_bytes( stream.mem_view(offset=counter_type_len), byteorder=PROTOCOL_BYTE_ORDER ) stream.seek(counter_type_len, SEEK_CUR) + return length - fields = [] - for i in range(length): - c_type = Struct(self.following).parse(stream) - fields.append(('element_{}'.format(i), c_type)) - - data_class = type( + def __build_final_class(self, fields): + return type( 'StructArray', (self.build_header_class(),), { @@ -171,36 +199,47 @@ def parse(self, stream): }, ) - return data_class - def to_python(self, ctype_object, *args, **kwargs): - result = [] length = getattr(ctype_object, 'length', 0) - for i in range(length): - result.append( - Struct( - self.following, dict_type=dict - ).to_python( - getattr(ctype_object, 'element_{}'.format(i)), - *args, **kwargs - ) - ) - return result + return [ + Struct(self.following, dict_type=dict).to_python(getattr(ctype_object, 'element_{}'.format(i)), + *args, **kwargs) + for i in range(length) + ] - def from_python(self, stream, value): - length = len(value) - header_class = self.build_header_class() - header = header_class() - header.length = length + async def to_python_async(self, ctype_object, *args, **kwargs): + length = getattr(ctype_object, 'length', 0) + result_coro = [ + Struct(self.following, dict_type=dict).to_python_async(getattr(ctype_object, 'element_{}'.format(i)), + *args, **kwargs) + for i in range(length) + ] + return await asyncio.gather(*result_coro) + def from_python(self, stream, value): + self.__write_header(stream, len(value)) - stream.write(header) - for i, v in enumerate(value): + for v in value: for default_key, default_value in self.defaults.items(): v.setdefault(default_key, default_value) for name, el_class in self.following: el_class.from_python(stream, v[name]) + async def from_python_async(self, stream, value): + self.__write_header(stream, len(value)) + + for v in value: + for default_key, default_value in self.defaults.items(): + v.setdefault(default_key, default_value) + for name, el_class in self.following: + await el_class.from_python_async(stream, v[name]) + + def __write_header(self, stream, length): + header_class = self.build_header_class() + header = header_class() + header.length = length + stream.write(header) + @attr.s class Struct: @@ -210,12 +249,7 @@ class Struct: defaults = attr.ib(type=dict, default={}) def parse(self, stream): - fields, ctx = [], {} - - for _, c_type in self.fields: - if isinstance(c_type, Conditional): - for name in c_type.fields: - ctx[name] = None + fields, ctx = [], self.__prepare_conditional_ctx() for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) @@ -224,7 +258,31 @@ def parse(self, stream): if name in ctx: ctx[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) - data_class = type( + return self.__build_final_class(fields) + + async def parse_async(self, stream): + fields, ctx = [], self.__prepare_conditional_ctx() + + for name, c_type in self.fields: + is_cond = isinstance(c_type, Conditional) + c_type = await c_type.parse_async(stream, ctx) if is_cond else await c_type.parse_async(stream) + fields.append((name, c_type)) + if name in ctx: + ctx[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) + + return self.__build_final_class(fields) + + def __prepare_conditional_ctx(self): + ctx = {} + for _, c_type in self.fields: + if isinstance(c_type, Conditional): + for name in c_type.fields: + ctx[name] = None + return ctx + + @staticmethod + def __build_final_class(fields): + return type( 'Struct', (ctypes.LittleEndianStructure,), { @@ -233,11 +291,7 @@ def parse(self, stream): }, ) - return data_class - - def to_python( - self, ctype_object, *args, **kwargs - ) -> Union[dict, OrderedDict]: + def to_python(self, ctype_object, *args, **kwargs) -> Union[dict, OrderedDict]: result = self.dict_type() for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) @@ -251,13 +305,41 @@ def to_python( ) return result + async def to_python_async(self, ctype_object, *args, **kwargs) -> Union[dict, OrderedDict]: + result = self.dict_type() + for name, c_type in self.fields: + is_cond = isinstance(c_type, Conditional) + + if is_cond: + value = await c_type.to_python_async( + getattr(ctype_object, name), + result, + *args, **kwargs + ) + else: + value = await c_type.to_python_async( + getattr(ctype_object, name), + *args, **kwargs + ) + result[name] = value + return result + def from_python(self, stream, value): - for default_key, default_value in self.defaults.items(): - value.setdefault(default_key, default_value) + self.__set_defaults(value) for name, el_class in self.fields: el_class.from_python(stream, value[name]) + async def from_python_async(self, stream, value): + self.__set_defaults(value) + + for name, el_class in self.fields: + await el_class.from_python_async(stream, value[name]) + + def __set_defaults(self, value): + for default_key, default_value in self.defaults.items(): + value.setdefault(default_key, default_value) + class AnyDataObject: """ @@ -294,29 +376,44 @@ def get_subtype(iterable, allow_none=False): # if an iterable contains items of more than one non-nullable type, # return None - if all([ - isinstance(x, type_first) - or ((x is None) and allow_none) for x in iterator - ]): + if all(isinstance(x, type_first) or ((x is None) and allow_none) for x in iterator): return type_first @classmethod def parse(cls, stream): + data_class = cls.__data_class_parse(stream) + return data_class.parse(stream) + + @classmethod + async def parse_async(cls, stream): + data_class = cls.__data_class_parse(stream) + return await data_class.parse_async(stream) + + @classmethod + def __data_class_parse(cls, stream): type_code = bytes(stream.mem_view(offset=ctypes.sizeof(ctypes.c_byte))) try: - data_class = tc_map(type_code) + return tc_map(type_code) except KeyError: raise ParseError('Unknown type code: `{}`'.format(type_code)) - return data_class.parse(stream) @classmethod def to_python(cls, ctype_object, *args, **kwargs): + data_class = cls.__data_class_from_ctype(ctype_object) + return data_class.to_python(ctype_object) + + @classmethod + async def to_python_async(cls, ctype_object, *args, **kwargs): + data_class = cls.__data_class_from_ctype(ctype_object) + return await data_class.to_python_async(ctype_object) + + @classmethod + def __data_class_from_ctype(cls, ctype_object): type_code = ctype_object.type_code.to_bytes( ctypes.sizeof(ctypes.c_byte), byteorder=PROTOCOL_BYTE_ORDER ) - data_class = tc_map(type_code) - return data_class.to_python(ctype_object) + return tc_map(type_code) @classmethod def _init_python_map(cls): @@ -423,6 +520,11 @@ def from_python(cls, stream, value): p_type = cls.map_python_type(value) p_type.from_python(stream, value) + @classmethod + async def from_python_async(cls, stream, value): + p_type = cls.map_python_type(value) + await p_type.from_python_async(stream, value) + def infer_from_python(stream, value: Any): """ @@ -431,14 +533,26 @@ def infer_from_python(stream, value: Any): :param value: pythonic value or (value, type_hint) tuple, :return: bytes. """ - if is_hinted(value): - value, data_type = value - else: - data_type = AnyDataObject + value, data_type = __unpack_hinted(value) data_type.from_python(stream, value) +async def infer_from_python_async(stream, value: Any): + """ + Async version of infer_from_python + """ + value, data_type = __unpack_hinted(value) + + await data_type.from_python_async(stream, value) + + +def __unpack_hinted(value): + if is_hinted(value): + return value + return value, AnyDataObject + + @attr.s class AnyDataArray(AnyDataObject): """ @@ -448,7 +562,7 @@ class AnyDataArray(AnyDataObject): def build_header(self): return type( - self.__class__.__name__+'Header', + self.__class__.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -459,16 +573,33 @@ def build_header(self): ) def parse(self, stream): - header_class = self.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + header, header_class = self.__parse_header(stream) fields = [] for i in range(header.length): c_type = super().parse(stream) fields.append(('element_{}'.format(i), c_type)) - final_class = type( + return self.__build_final_class(header_class, fields) + + async def parse_async(self, stream): + header, header_class = self.__parse_header(stream) + + fields = [] + for i in range(header.length): + c_type = await super().parse_async(stream) + fields.append(('element_{}'.format(i), c_type)) + + return self.__build_final_class(header_class, fields) + + def __parse_header(self, stream): + header_class = self.build_header() + header = stream.read_ctype(header_class) + stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + return header, header_class + + def __build_final_class(self, header_class, fields): + return type( self.__class__.__name__, (header_class,), { @@ -476,34 +607,58 @@ def parse(self, stream): '_fields_': fields, } ) - return final_class @classmethod def to_python(cls, ctype_object, *args, **kwargs): - result = [] - length = getattr(ctype_object, "length", None) - if length is None: - return None - for i in range(length): - result.append( + length = cls.__get_length(ctype_object) + + return [ + super().to_python(getattr(ctype_object, 'element_{}'.format(i)), *args, **kwargs) + for i in range(length) + ] + + @classmethod + async def to_python_async(cls, ctype_object, *args, **kwargs): + length = cls.__get_length(ctype_object) + + values = asyncio.gather( + *[ super().to_python( getattr(ctype_object, 'element_{}'.format(i)), *args, **kwargs - ) - ) - return result + ) for i in range(length) + ] + ) + return await values - def from_python(self, stream, value): - header_class = self.build_header() - header = header_class() + @staticmethod + def __get_length(ctype_object): + return getattr(ctype_object, "length", None) + def from_python(self, stream, value): try: length = len(value) except TypeError: value = [value] length = 1 - header.length = length + self.__write_header(stream, length) - stream.write(header) for x in value: infer_from_python(stream, x) + + async def from_python_async(self, stream, value): + try: + length = len(value) + except TypeError: + value = [value] + length = 1 + self.__write_header(stream, length) + + for x in value: + await infer_from_python_async(stream, x) + + def __write_header(self, stream, length): + header_class = self.build_header() + header = header_class() + header.length = length + stream.write(header) diff --git a/pyignite/datatypes/null_object.py b/pyignite/datatypes/null_object.py index 912ded8..f16034f 100644 --- a/pyignite/datatypes/null_object.py +++ b/pyignite/datatypes/null_object.py @@ -21,13 +21,12 @@ import ctypes from io import SEEK_CUR -from typing import Any from .base import IgniteDataType from .type_codes import TC_NULL -__all__ = ['Null'] +__all__ = ['Null', 'Nullable'] from ..constants import PROTOCOL_BYTE_ORDER @@ -37,11 +36,6 @@ class Null(IgniteDataType): pythonic = type(None) _object_c_type = None - @staticmethod - def hashcode(value: Any) -> int: - # Null object can not be a cache key. - return 0 - @classmethod def build_c_type(cls): if cls._object_c_type is None: @@ -59,55 +53,99 @@ def build_c_type(cls): @classmethod def parse(cls, stream): - init_pos, offset = stream.tell(), ctypes.sizeof(ctypes.c_byte) - stream.seek(offset, SEEK_CUR) + stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) return cls.build_c_type() - @staticmethod - def to_python(*args, **kwargs): + @classmethod + def to_python(cls, *args, **kwargs): return None - @staticmethod - def from_python(stream, *args): + @classmethod + def from_python(cls, stream, *args): stream.write(TC_NULL) -class Nullable: +class Nullable(IgniteDataType): @classmethod def parse_not_null(cls, stream): raise NotImplementedError + @classmethod + async def parse_not_null_async(cls, stream): + return cls.parse_not_null(stream) + @classmethod def parse(cls, stream): - type_len = ctypes.sizeof(ctypes.c_byte) + is_null, null_type = cls.__check_null_input(stream) - if stream.mem_view(offset=type_len) == TC_NULL: - stream.seek(type_len, SEEK_CUR) - return Null.build_c_type() + if is_null: + return null_type return cls.parse_not_null(stream) + @classmethod + async def parse_async(cls, stream): + is_null, null_type = cls.__check_null_input(stream) + + if is_null: + return null_type + + return await cls.parse_not_null_async(stream) + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + raise NotImplementedError + + @classmethod + async def from_python_not_null_async(cls, stream, value, **kwargs): + return cls.from_python_not_null(stream, value, **kwargs) + + @classmethod + def from_python(cls, stream, value, **kwargs): + if value is None: + Null.from_python(stream) + else: + cls.from_python_not_null(stream, value) + + @classmethod + async def from_python_async(cls, stream, value, **kwargs): + if value is None: + Null.from_python(stream) + else: + await cls.from_python_not_null_async(stream, value, **kwargs) + @classmethod def to_python_not_null(cls, ctypes_object, *args, **kwargs): raise NotImplementedError + @classmethod + async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): + return cls.to_python_not_null(ctypes_object, *args, **kwargs) + @classmethod def to_python(cls, ctypes_object, *args, **kwargs): - if ctypes_object.type_code == int.from_bytes( - TC_NULL, - byteorder=PROTOCOL_BYTE_ORDER - ): + if cls.__is_null(ctypes_object): return None return cls.to_python_not_null(ctypes_object, *args, **kwargs) @classmethod - def from_python_not_null(cls, stream, value): - raise NotImplementedError + async def to_python_async(cls, ctypes_object, *args, **kwargs): + if cls.__is_null(ctypes_object): + return None + + return await cls.to_python_not_null_async(ctypes_object, *args, **kwargs) @classmethod - def from_python(cls, stream, value): - if value is None: - Null.from_python(stream) - else: - cls.from_python_not_null(stream, value) + def __check_null_input(cls, stream): + type_len = ctypes.sizeof(ctypes.c_byte) + + if stream.mem_view(offset=type_len) == TC_NULL: + stream.seek(type_len, SEEK_CUR) + return True, Null.build_c_type() + + return False, None + + @classmethod + def __is_null(cls, ctypes_object): + return ctypes_object.type_code == int.from_bytes(TC_NULL, byteorder=PROTOCOL_BYTE_ORDER) diff --git a/pyignite/datatypes/primitive.py b/pyignite/datatypes/primitive.py index ffa2e32..3bbb196 100644 --- a/pyignite/datatypes/primitive.py +++ b/pyignite/datatypes/primitive.py @@ -48,8 +48,7 @@ class Primitive(IgniteDataType): @classmethod def parse(cls, stream): - init_pos, offset = stream.tell(), ctypes.sizeof(cls.c_type) - stream.seek(offset, SEEK_CUR) + stream.seek(ctypes.sizeof(cls.c_type), SEEK_CUR) return cls.c_type @classmethod diff --git a/pyignite/datatypes/primitive_arrays.py b/pyignite/datatypes/primitive_arrays.py index 7cb5b20..a21de77 100644 --- a/pyignite/datatypes/primitive_arrays.py +++ b/pyignite/datatypes/primitive_arrays.py @@ -15,11 +15,8 @@ import ctypes from io import SEEK_CUR -from typing import Any from pyignite.constants import * -from . import Null -from .base import IgniteDataType from .null_object import Nullable from .primitive import * from .type_codes import * @@ -35,7 +32,7 @@ ] -class PrimitiveArray(IgniteDataType, Nullable): +class PrimitiveArray(Nullable): """ Base class for array of primitives. Payload-only. """ @@ -44,15 +41,10 @@ class PrimitiveArray(IgniteDataType, Nullable): primitive_type = None type_code = None - @staticmethod - def hashcode(value: Any) -> int: - # Arrays are not supported as keys at the moment. - return 0 - @classmethod def build_header_class(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -88,7 +80,11 @@ def to_python(cls, ctype_object, *args, **kwargs): return [ctype_object.data[i] for i in range(ctype_object.length)] @classmethod - def from_python_not_null(cls, stream, value): + async def to_python_async(cls, ctypes_object, *args, **kwargs): + return cls.to_python(ctypes_object, *args, **kwargs) + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): header_class = cls.build_header_class() header = header_class() if hasattr(header, 'type_code'): @@ -188,7 +184,7 @@ class PrimitiveArrayObject(PrimitiveArray): @classmethod def build_header_class(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -312,7 +308,5 @@ def to_python(cls, ctype_object, *args, **kwargs): length = getattr(ctype_object, "length", None) if length is None: return None - result = [False] * length - for i in range(length): - result[i] = ctype_object.data[i] != 0 - return result + + return [ctype_object.data[i] != 0 for i in range(length)] diff --git a/pyignite/datatypes/primitive_objects.py b/pyignite/datatypes/primitive_objects.py index e942dd7..5849935 100644 --- a/pyignite/datatypes/primitive_objects.py +++ b/pyignite/datatypes/primitive_objects.py @@ -18,11 +18,10 @@ from pyignite.constants import * from pyignite.utils import unsigned -from .base import IgniteDataType from .type_codes import * from .type_ids import * from .type_names import * -from .null_object import Null, Nullable +from .null_object import Nullable __all__ = [ 'DataObject', 'ByteObject', 'ShortObject', 'IntObject', 'LongObject', @@ -30,7 +29,7 @@ ] -class DataObject(IgniteDataType, Nullable): +class DataObject(Nullable): """ Base class for primitive data objects. @@ -65,12 +64,16 @@ def parse_not_null(cls, stream): stream.seek(ctypes.sizeof(data_type), SEEK_CUR) return data_type - @staticmethod - def to_python(ctype_object, *args, **kwargs): + @classmethod + def to_python(cls, ctype_object, *args, **kwargs): return getattr(ctype_object, "value", None) @classmethod - def from_python_not_null(cls, stream, value): + async def to_python_async(cls, ctype_object, *args, **kwargs): + return cls.to_python(ctype_object, *args, **kwargs) + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -89,8 +92,8 @@ class ByteObject(DataObject): pythonic = int default = 0 - @staticmethod - def hashcode(value: int, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: int, *args, **kwargs) -> int: return value @@ -102,8 +105,8 @@ class ShortObject(DataObject): pythonic = int default = 0 - @staticmethod - def hashcode(value: int, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: int, *args, **kwargs) -> int: return value @@ -115,8 +118,8 @@ class IntObject(DataObject): pythonic = int default = 0 - @staticmethod - def hashcode(value: int, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: int, *args, **kwargs) -> int: return value @@ -128,8 +131,8 @@ class LongObject(DataObject): pythonic = int default = 0 - @staticmethod - def hashcode(value: int, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: int, *args, **kwargs) -> int: return value ^ (unsigned(value, ctypes.c_ulonglong) >> 32) @@ -141,8 +144,8 @@ class FloatObject(DataObject): pythonic = float default = 0.0 - @staticmethod - def hashcode(value: float, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: float, *args, **kwargs) -> int: return ctypes.cast( ctypes.pointer(ctypes.c_float(value)), ctypes.POINTER(ctypes.c_int) @@ -157,8 +160,8 @@ class DoubleObject(DataObject): pythonic = float default = 0.0 - @staticmethod - def hashcode(value: float, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: float, *args, **kwargs) -> int: bits = ctypes.cast( ctypes.pointer(ctypes.c_double(value)), ctypes.POINTER(ctypes.c_longlong) @@ -180,8 +183,8 @@ class CharObject(DataObject): pythonic = str default = ' ' - @staticmethod - def hashcode(value: str, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: str, *args, **kwargs) -> int: return ord(value) @classmethod @@ -195,7 +198,7 @@ def to_python(cls, ctype_object, *args, **kwargs): ).decode(PROTOCOL_CHAR_ENCODING) @classmethod - def from_python_not_null(cls, stream, value): + def from_python_not_null(cls, stream, value, **kwargs): if type(value) is str: value = value.encode(PROTOCOL_CHAR_ENCODING) # assuming either a bytes or an integer @@ -216,8 +219,8 @@ class BoolObject(DataObject): pythonic = bool default = False - @staticmethod - def hashcode(value: bool, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: bool, *args, **kwargs) -> int: return 1231 if value else 1237 @classmethod @@ -226,4 +229,3 @@ def to_python(cls, ctype_object, *args, **kwargs): if value is None: return None return value != 0 - diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index af50a8e..2b61235 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -18,16 +18,15 @@ import decimal from io import SEEK_CUR from math import ceil -from typing import Any, Tuple +from typing import Tuple import uuid from pyignite.constants import * from pyignite.utils import datetime_hashcode, decimal_hashcode, hashcode -from .base import IgniteDataType from .type_codes import * from .type_ids import * from .type_names import * -from .null_object import Null, Nullable +from .null_object import Nullable __all__ = [ 'String', 'DecimalObject', 'UUIDObject', 'TimestampObject', 'DateObject', @@ -44,7 +43,7 @@ ] -class StandardObject(IgniteDataType, Nullable): +class StandardObject(Nullable): _type_name = None _type_id = None type_code = None @@ -60,7 +59,7 @@ def parse_not_null(cls, stream): return data_type -class String(IgniteDataType, Nullable): +class String(Nullable): """ Pascal-style string: `c_int` counter, followed by count*bytes. UTF-8-encoded, so that one character may take 1 to 4 bytes. @@ -70,8 +69,8 @@ class String(IgniteDataType, Nullable): type_code = TC_STRING pythonic = str - @staticmethod - def hashcode(value: str, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: str, *args, **kwargs) -> int: return hashcode(value) @classmethod @@ -124,15 +123,15 @@ def from_python_not_null(cls, stream, value): stream.write(data_object) -class DecimalObject(IgniteDataType, Nullable): +class DecimalObject(Nullable): _type_name = NAME_DECIMAL _type_id = TYPE_DECIMAL type_code = TC_DECIMAL pythonic = decimal.Decimal default = decimal.Decimal('0.00') - @staticmethod - def hashcode(value: decimal.Decimal, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: decimal.Decimal, *args, **kwargs) -> int: return decimal_hashcode(value) @classmethod @@ -180,11 +179,7 @@ def to_python_not_null(cls, ctype_object, *args, **kwargs): range(len(data)) ]) # apply scale - result = ( - result - / decimal.Decimal('10') - ** decimal.Decimal(ctype_object.scale) - ) + result = result / decimal.Decimal('10') ** decimal.Decimal(ctype_object.scale) if sign: # apply sign result = -result @@ -195,7 +190,7 @@ def from_python_not_null(cls, stream, value: decimal.Decimal): sign, digits, scale = value.normalize().as_tuple() integer = int(''.join([str(d) for d in digits])) # calculate number of bytes (at least one, and not forget the sign bit) - length = ceil((integer.bit_length() + 1)/8) + length = ceil((integer.bit_length() + 1) / 8) # write byte string data = [] for i in range(length): @@ -247,8 +242,8 @@ class UUIDObject(StandardObject): UUID_BYTE_ORDER = (7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8) - @staticmethod - def hashcode(value: 'UUID', *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: 'UUID', *args, **kwargs) -> int: msb = value.int >> 64 lsb = value.int & 0xffffffffffffffff hilo = msb ^ lsb @@ -309,8 +304,8 @@ class TimestampObject(StandardObject): pythonic = tuple default = (datetime(1970, 1, 1), 0) - @staticmethod - def hashcode(value: Tuple[datetime, int], *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: Tuple[datetime, int], *args, **kwargs) -> int: return datetime_hashcode(int(value[0].timestamp() * 1000)) @classmethod @@ -331,7 +326,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python_not_null(cls, stream, value: tuple): + def from_python_not_null(cls, stream, value: tuple, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -346,7 +341,7 @@ def from_python_not_null(cls, stream, value: tuple): @classmethod def to_python_not_null(cls, ctypes_object, *args, **kwargs): return ( - datetime.fromtimestamp(ctypes_object.epoch/1000), + datetime.fromtimestamp(ctypes_object.epoch / 1000), ctypes_object.fraction ) @@ -365,8 +360,8 @@ class DateObject(StandardObject): pythonic = datetime default = datetime(1970, 1, 1) - @staticmethod - def hashcode(value: datetime, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: datetime, *args, **kwargs) -> int: return datetime_hashcode(int(value.timestamp() * 1000)) @classmethod @@ -401,7 +396,7 @@ def from_python_not_null(cls, stream, value: [date, datetime]): @classmethod def to_python_not_null(cls, ctypes_object, *args, **kwargs): - return datetime.fromtimestamp(ctypes_object.epoch/1000) + return datetime.fromtimestamp(ctypes_object.epoch / 1000) class TimeObject(StandardObject): @@ -417,8 +412,8 @@ class TimeObject(StandardObject): pythonic = timedelta default = timedelta() - @staticmethod - def hashcode(value: timedelta, *args, **kwargs) -> int: + @classmethod + def hashcode(cls, value: timedelta, *args, **kwargs) -> int: return datetime_hashcode(int(value.total_seconds() * 1000)) @classmethod @@ -510,7 +505,7 @@ class BinaryEnumObject(EnumObject): type_code = TC_BINARY_ENUM -class StandardArray(IgniteDataType, Nullable): +class StandardArray(Nullable): """ Base class for array of primitives. Payload-only. """ @@ -519,15 +514,10 @@ class StandardArray(IgniteDataType, Nullable): standard_type = None type_code = None - @staticmethod - def hashcode(value: Any) -> int: - # Arrays are not supported as keys at the moment. - return 0 - @classmethod def build_header_class(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -575,7 +565,11 @@ def to_python(cls, ctype_object, *args, **kwargs): return result @classmethod - def from_python_not_null(cls, stream, value): + async def to_python_async(cls, ctypes_object, *args, **kwargs): + return cls.to_python(ctypes_object, *args, **kwargs) + + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): header_class = cls.build_header_class() header = header_class() if hasattr(header, 'type_code'): @@ -648,7 +642,7 @@ class StandardArrayObject(StandardArray): @classmethod def build_header_class(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -723,7 +717,7 @@ class EnumArrayObject(StandardArrayObject): @classmethod def build_header_class(cls): return type( - cls.__name__+'Header', + cls.__name__ + 'Header', (ctypes.LittleEndianStructure,), { '_pack_': 1, @@ -736,7 +730,7 @@ def build_header_class(cls): ) @classmethod - def from_python_not_null(cls, stream, value): + def from_python_not_null(cls, stream, value, **kwargs): type_id, value = value header_class = cls.build_header_class() header = header_class() @@ -754,7 +748,7 @@ def from_python_not_null(cls, stream, value): cls.standard_type.from_python(stream, x) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): + def to_python_not_null(cls, ctype_object, *args, **kwargs): type_id = getattr(ctype_object, "type_id", None) if type_id is None: return None diff --git a/pyignite/exceptions.py b/pyignite/exceptions.py index 5933228..579aa29 100644 --- a/pyignite/exceptions.py +++ b/pyignite/exceptions.py @@ -93,4 +93,4 @@ class SQLError(CacheError): pass -connection_errors = (IOError, OSError) +connection_errors = (IOError, OSError, EOFError) diff --git a/pyignite/queries/__init__.py b/pyignite/queries/__init__.py index d558125..56c6347 100644 --- a/pyignite/queries/__init__.py +++ b/pyignite/queries/__init__.py @@ -21,4 +21,4 @@ :mod:`pyignite.datatypes` binary parser/generator classes. """ -from .query import Query, ConfigQuery +from .query import Query, ConfigQuery, query_perform diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index b5be753..beea5d9 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -13,15 +13,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -import attr import ctypes +from io import SEEK_CUR from random import randint +import attr + from pyignite.api.result import APIResult -from pyignite.connection import Connection +from pyignite.connection import Connection, AioConnection from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED -from pyignite.queries.response import Response, SQLResponse -from pyignite.stream import BinaryStream, READ_BACKWARD +from pyignite.queries.response import Response +from pyignite.stream import AioBinaryStream, BinaryStream, READ_BACKWARD + + +def query_perform(query_struct, conn, post_process_fun=None, **kwargs): + async def _async_internal(): + result = await query_struct.perform_async(conn, **kwargs) + if post_process_fun: + return post_process_fun(result) + return result + + def _internal(): + result = query_struct.perform(conn, **kwargs) + if post_process_fun: + return post_process_fun(result) + return result + + if isinstance(conn, AioConnection): + return _async_internal() + return _internal() @attr.s @@ -29,6 +49,7 @@ class Query: op_code = attr.ib(type=int) following = attr.ib(type=list, factory=list) query_id = attr.ib(type=int, default=None) + response_type = attr.ib(type=type(Response), default=Response) _query_c_type = None @classmethod @@ -48,32 +69,45 @@ def build_c_type(cls): ) return cls._query_c_type - def _build_header(self, stream, values: dict): + def from_python(self, stream, values: dict = None): + init_pos, header = stream.tell(), self._build_header(stream) + values = values if values else None + + for name, c_type in self.following: + c_type.from_python(stream, values[name]) + + self.__write_header(stream, header, init_pos) + + async def from_python_async(self, stream, values: dict = None): + init_pos, header = stream.tell(), self._build_header(stream) + values = values if values else None + + for name, c_type in self.following: + await c_type.from_python_async(stream, values[name]) + + self.__write_header(stream, header, init_pos) + + def _build_header(self, stream): header_class = self.build_c_type() header_len = ctypes.sizeof(header_class) - init_pos = stream.tell() - stream.seek(init_pos + header_len) + stream.seek(header_len, SEEK_CUR) header = header_class() header.op_code = self.op_code if self.query_id is None: header.query_id = randint(MIN_LONG, MAX_LONG) - for name, c_type in self.following: - c_type.from_python(stream, values[name]) + return header + @staticmethod + def __write_header(stream, header, init_pos): header.length = stream.tell() - init_pos - ctypes.sizeof(ctypes.c_int) stream.seek(init_pos) - - return header - - def from_python(self, stream, values: dict = None): - header = self._build_header(stream, values if values else {}) stream.write(header) def perform( self, conn: Connection, query_params: dict = None, - response_config: list = None, sql: bool = False, **kwargs, + response_config: list = None, **kwargs, ) -> APIResult: """ Perform query and process result. @@ -83,26 +117,60 @@ def perform( Defaults to no parameters, :param response_config: (optional) response configuration − list of (name, type_hint) tuples. Defaults to empty return value, - :param sql: (optional) use normal (default) or SQL response class, :return: instance of :class:`~pyignite.api.result.APIResult` with raw value (may undergo further processing in API functions). """ - with BinaryStream(conn) as stream: + with BinaryStream(conn.client) as stream: self.from_python(stream, query_params) - conn.send(stream.getbuffer()) + response_data = conn.request(stream.getbuffer()) - if sql: - response_struct = SQLResponse(protocol_version=conn.get_protocol_version(), - following=response_config, **kwargs) - else: - response_struct = Response(protocol_version=conn.get_protocol_version(), - following=response_config) + response_struct = self.response_type(protocol_version=conn.protocol_version, + following=response_config, **kwargs) - with BinaryStream(conn, conn.recv()) as stream: + with BinaryStream(conn.client, response_data) as stream: response_ctype = response_struct.parse(stream) response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) - # this test depends on protocol version + result = self.__post_process_response(conn, response_struct, response) + + if result.status == 0: + result.value = response_struct.to_python(response) + return result + + async def perform_async( + self, conn: AioConnection, query_params: dict = None, + response_config: list = None, **kwargs, + ) -> APIResult: + """ + Perform query and process result. + + :param conn: connection to Ignite server, + :param query_params: (optional) dict of named query parameters. + Defaults to no parameters, + :param response_config: (optional) response configuration − list of + (name, type_hint) tuples. Defaults to empty return value, + :return: instance of :class:`~pyignite.api.result.APIResult` with raw + value (may undergo further processing in API functions). + """ + with AioBinaryStream(conn.client) as stream: + await self.from_python_async(stream, query_params) + data = await conn.request(stream.getbuffer()) + + response_struct = self.response_type(protocol_version=conn.protocol_version, + following=response_config, **kwargs) + + with AioBinaryStream(conn.client, data) as stream: + response_ctype = await response_struct.parse_async(stream) + response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) + + result = self.__post_process_response(conn, response_struct, response) + + if result.status == 0: + result.value = await response_struct.to_python_async(response) + return result + + @staticmethod + def __post_process_response(conn, response_struct, response): if getattr(response, 'flags', False) & RHF_TOPOLOGY_CHANGED: # update latest affinity version new_affinity = (response.affinity_version, response.affinity_minor) @@ -112,10 +180,7 @@ def perform( conn.client.affinity_version = new_affinity # build result - result = APIResult(response) - if result.status == 0: - result.value = response_struct.to_python(response) - return result + return APIResult(response) class ConfigQuery(Query): @@ -142,7 +207,7 @@ def build_c_type(cls): ) return cls._query_c_type - def _build_header(self, stream, values: dict): - header = super()._build_header(stream, values) + def _build_header(self, stream): + header = super()._build_header(stream) header.config_length = header.length - ctypes.sizeof(type(header)) return header diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index ca2ae14..83a6e6a 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import asyncio from io import SEEK_CUR import attr @@ -20,6 +21,7 @@ from pyignite.constants import RHF_TOPOLOGY_CHANGED, RHF_ERROR from pyignite.datatypes import AnyDataObject, Bool, Int, Long, String, StringArray, Struct +from pyignite.datatypes.binary import body_struct, enum_struct, schema_struct from pyignite.queries.op_codes import OP_SUCCESS from pyignite.stream import READ_BACKWARD @@ -35,7 +37,7 @@ def __attrs_post_init__(self): # replace None with empty list self.following = self.following or [] - def build_header(self): + def __build_header(self): if self._response_header is None: fields = [ ('length', ctypes.c_int), @@ -57,9 +59,9 @@ def build_header(self): ) return self._response_header - def parse(self, stream): + def __parse_header(self, stream): init_pos = stream.tell() - header_class = self.build_header() + header_class = self.__build_header() header_len = ctypes.sizeof(header_class) header = stream.read_ctype(header_class) stream.seek(header_len, SEEK_CUR) @@ -85,9 +87,10 @@ def parse(self, stream): if has_error: msg_type = String.parse(stream) fields.append(('error_message', msg_type)) - else: - self._parse_success(stream, fields) + return not has_error, init_pos, header_class, fields + + def __build_response_class(self, stream, init_pos, header_class, fields): response_class = type( self._response_class_name, (header_class,), @@ -100,21 +103,52 @@ def parse(self, stream): stream.seek(init_pos + ctypes.sizeof(response_class)) return response_class + def parse(self, stream): + success, init_pos, header_class, fields = self.__parse_header(stream) + if success: + self._parse_success(stream, fields) + + return self.__build_response_class(stream, init_pos, header_class, fields) + + async def parse_async(self, stream): + success, init_pos, header_class, fields = self.__parse_header(stream) + if success: + await self._parse_success_async(stream, fields) + + return self.__build_response_class(stream, init_pos, header_class, fields) + def _parse_success(self, stream, fields: list): for name, ignite_type in self.following: c_type = ignite_type.parse(stream) fields.append((name, c_type)) + async def _parse_success_async(self, stream, fields: list): + for name, ignite_type in self.following: + c_type = await ignite_type.parse_async(stream) + fields.append((name, c_type)) + def to_python(self, ctype_object, *args, **kwargs): - result = OrderedDict() + if not self.following: + return None + result = OrderedDict() for name, c_type in self.following: result[name] = c_type.to_python( getattr(ctype_object, name), *args, **kwargs ) - return result if result else None + return result + + async def to_python_async(self, ctype_object, *args, **kwargs): + if not self.following: + return None + + values = await asyncio.gather( + *[c_type.to_python_async(getattr(ctype_object, name), *args, **kwargs) for name, c_type in self.following] + ) + + return OrderedDict([(name, values[i]) for i, (name, _) in enumerate(self.following)]) @attr.s @@ -135,38 +169,62 @@ def fields_or_field_count(self): return 'field_count', Int def _parse_success(self, stream, fields: list): - following = [ - self.fields_or_field_count(), - ('row_count', Int), - ] - if self.has_cursor: - following.insert(0, ('cursor', Long)) - body_struct = Struct(following) + body_struct = self.__create_body_struct() body_class = body_struct.parse(stream) body = stream.read_ctype(body_class, direction=READ_BACKWARD) - if self.include_field_names: - field_count = body.fields.length - else: - field_count = body.field_count - - data_fields = [] + data_fields, field_count = [], self.__get_fields_count(body) for i in range(body.row_count): row_fields = [] for j in range(field_count): field_class = AnyDataObject.parse(stream) row_fields.append(('column_{}'.format(j), field_class)) - row_class = type( - 'SQLResponseRow', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': row_fields, - } - ) - data_fields.append(('row_{}'.format(i), row_class)) + self.__row_post_process(i, row_fields, data_fields) + + self.__body_class_post_process(body_class, fields, data_fields) + + async def _parse_success_async(self, stream, fields: list): + body_struct = self.__create_body_struct() + body_class = await body_struct.parse_async(stream) + body = stream.read_ctype(body_class, direction=READ_BACKWARD) + + data_fields, field_count = [], self.__get_fields_count(body) + for i in range(body.row_count): + row_fields = [] + for j in range(field_count): + field_class = await AnyDataObject.parse_async(stream) + row_fields.append(('column_{}'.format(j), field_class)) + + self.__row_post_process(i, row_fields, data_fields) + + self.__body_class_post_process(body_class, fields, data_fields) + + def __create_body_struct(self): + following = [self.fields_or_field_count(), ('row_count', Int)] + if self.has_cursor: + following.insert(0, ('cursor', Long)) + return Struct(following) + + def __get_fields_count(self, body): + if self.include_field_names: + return body.fields.length + return body.field_count + + @staticmethod + def __row_post_process(idx, row_fields, data_fields): + row_class = type( + 'SQLResponseRow', + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': row_fields, + } + ) + data_fields.append((f'row_{idx}', row_class)) + @staticmethod + def __body_class_post_process(body_class, fields, data_fields): data_class = type( 'SQLResponseData', (ctypes.LittleEndianStructure,), @@ -182,24 +240,8 @@ def _parse_success(self, stream, fields: list): def to_python(self, ctype_object, *args, **kwargs): if getattr(ctype_object, 'status_code', 0) == 0: - result = { - 'more': Bool.to_python( - ctype_object.more, *args, **kwargs - ), - 'data': [], - } - if hasattr(ctype_object, 'fields'): - result['fields'] = StringArray.to_python( - ctype_object.fields, *args, **kwargs - ) - else: - result['field_count'] = Int.to_python( - ctype_object.field_count, *args, **kwargs - ) - if hasattr(ctype_object, 'cursor'): - result['cursor'] = Long.to_python( - ctype_object.cursor, *args, **kwargs - ) + result = self.__to_python_result_header(ctype_object, *args, **kwargs) + for row_item in ctype_object.data._fields_: row_name = row_item[0] row_object = getattr(ctype_object.data, row_name) @@ -207,8 +249,104 @@ def to_python(self, ctype_object, *args, **kwargs): for col_item in row_object._fields_: col_name = col_item[0] col_object = getattr(row_object, col_name) - row.append( - AnyDataObject.to_python(col_object, *args, **kwargs) - ) + row.append(AnyDataObject.to_python(col_object, *args, **kwargs)) result['data'].append(row) return result + + async def to_python_async(self, ctype_object, *args, **kwargs): + if getattr(ctype_object, 'status_code', 0) == 0: + result = self.__to_python_result_header(ctype_object, *args, **kwargs) + + data_coro = [] + for row_item in ctype_object.data._fields_: + row_name = row_item[0] + row_object = getattr(ctype_object.data, row_name) + row_coro = [] + for col_item in row_object._fields_: + col_name = col_item[0] + col_object = getattr(row_object, col_name) + row_coro.append(AnyDataObject.to_python_async(col_object, *args, **kwargs)) + + data_coro.append(asyncio.gather(*row_coro)) + + result['data'] = await asyncio.gather(*data_coro) + return result + + @staticmethod + def __to_python_result_header(ctype_object, *args, **kwargs): + result = { + 'more': Bool.to_python(ctype_object.more, *args, **kwargs), + 'data': [], + } + if hasattr(ctype_object, 'fields'): + result['fields'] = StringArray.to_python(ctype_object.fields, *args, **kwargs) + else: + result['field_count'] = Int.to_python(ctype_object.field_count, *args, **kwargs) + + if hasattr(ctype_object, 'cursor'): + result['cursor'] = Long.to_python(ctype_object.cursor, *args, **kwargs) + return result + + +class BinaryTypeResponse(Response): + _response_class_name = 'GetBinaryTypeResponse' + + def _parse_success(self, stream, fields: list): + type_exists = self.__process_type_exists(stream, fields) + + if type_exists.value: + resp_body_type = body_struct.parse(stream) + fields.append(('body', resp_body_type)) + resp_body = stream.read_ctype(resp_body_type, direction=READ_BACKWARD) + if resp_body.is_enum: + resp_enum = enum_struct.parse(stream) + fields.append(('enums', resp_enum)) + + resp_schema_type = schema_struct.parse(stream) + fields.append(('schema', resp_schema_type)) + + async def _parse_success_async(self, stream, fields: list): + type_exists = self.__process_type_exists(stream, fields) + + if type_exists.value: + resp_body_type = await body_struct.parse_async(stream) + fields.append(('body', resp_body_type)) + resp_body = stream.read_ctype(resp_body_type, direction=READ_BACKWARD) + if resp_body.is_enum: + resp_enum = await enum_struct.parse_async(stream) + fields.append(('enums', resp_enum)) + + resp_schema_type = await schema_struct.parse_async(stream) + fields.append(('schema', resp_schema_type)) + + @staticmethod + def __process_type_exists(stream, fields): + fields.append(('type_exists', ctypes.c_byte)) + type_exists = stream.read_ctype(ctypes.c_byte) + stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) + + return type_exists + + def to_python(self, ctype_object, *args, **kwargs): + if getattr(ctype_object, 'status_code', 0) == 0: + result = { + 'type_exists': Bool.to_python(ctype_object.type_exists) + } + + if hasattr(ctype_object, 'body'): + result.update(body_struct.to_python(ctype_object.body)) + + if hasattr(ctype_object, 'enums'): + result['enums'] = enum_struct.to_python(ctype_object.enums) + + if hasattr(ctype_object, 'schema'): + result['schema'] = { + x['schema_id']: [ + z['schema_field_id'] for z in x['schema_fields'] + ] + for x in schema_struct.to_python(ctype_object.schema) + } + return result + + async def to_python_async(self, ctype_object, *args, **kwargs): + return self.to_python(ctype_object, *args, **kwargs) diff --git a/pyignite/stream/__init__.py b/pyignite/stream/__init__.py index 94153b4..76d171d 100644 --- a/pyignite/stream/__init__.py +++ b/pyignite/stream/__init__.py @@ -13,4 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .binary_stream import BinaryStream, READ_FORWARD, READ_BACKWARD \ No newline at end of file +from .binary_stream import BinaryStream, AioBinaryStream, READ_FORWARD, READ_BACKWARD + +__all__ = ['BinaryStream', 'AioBinaryStream', 'READ_BACKWARD', 'READ_FORWARD'] diff --git a/pyignite/stream/binary_stream.py b/pyignite/stream/binary_stream.py index 46ac683..57b4b83 100644 --- a/pyignite/stream/binary_stream.py +++ b/pyignite/stream/binary_stream.py @@ -14,39 +14,23 @@ # limitations under the License. import ctypes from io import BytesIO +from typing import Union, Optional +import pyignite import pyignite.utils as ignite_utils READ_FORWARD = 0 READ_BACKWARD = 1 -class BinaryStream: - def __init__(self, conn, buf=None): - """ - Initialize binary stream around buffers. - - :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. - :param conn: Connection instance, required. - """ - from pyignite.connection import Connection - - if not isinstance(conn, Connection): - raise TypeError(f"invalid parameter: expected instance of {Connection}") - - if buf and not isinstance(buf, (bytearray, bytes, memoryview)): - raise TypeError(f"invalid parameter: expected bytes-like object") - - self.conn = conn - self.stream = BytesIO(buf) if buf else BytesIO() - +class BinaryStreamBaseMixin: @property def compact_footer(self) -> bool: - return self.conn.client.compact_footer + return self.client.compact_footer @compact_footer.setter def compact_footer(self, value: bool): - self.conn.client.compact_footer = value + self.client.compact_footer = value def read(self, size): buf = bytearray(size) @@ -86,10 +70,10 @@ def getbuffer(self): def mem_view(self, start=-1, offset=0): start = start if start >= 0 else self.tell() - return self.stream.getbuffer()[start:start+offset] + return self.stream.getbuffer()[start:start + offset] def hashcode(self, start, bytes_len): - return ignite_utils.hashcode(self.stream.getbuffer()[start:start+bytes_len]) + return ignite_utils.hashcode(self.stream.getbuffer()[start:start + bytes_len]) def __enter__(self): return self @@ -100,15 +84,48 @@ def __exit__(self, exc_type, exc_value, traceback): except BufferError: pass + +class BinaryStream(BinaryStreamBaseMixin): + """ + Synchronous binary stream. + """ + def __init__(self, client: 'pyignite.Client', buf: Optional[Union[bytes, bytearray, memoryview]] = None): + """ + :param client: Client instance, required. + :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. + """ + self.client = client + self.stream = BytesIO(buf) if buf else BytesIO() + def get_dataclass(self, header): - # get field names from outer space - result = self.conn.client.query_binary_type( - header.type_id, - header.schema_id - ) + result = self.client.query_binary_type(header.type_id, header.schema_id) if not result: raise RuntimeError('Binary type is not registered') return result def register_binary_type(self, *args, **kwargs): - return self.conn.client.register_binary_type(*args, **kwargs) + self.client.register_binary_type(*args, **kwargs) + + +class AioBinaryStream(BinaryStreamBaseMixin): + """ + Asyncio binary stream. + """ + def __init__(self, client: 'pyignite.AioClient', buf: Optional[Union[bytes, bytearray, memoryview]] = None): + """ + Initialize binary stream around buffers. + + :param client: AioClient instance, required. + :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. + """ + self.client = client + self.stream = BytesIO(buf) if buf else BytesIO() + + async def get_dataclass(self, header): + result = await self.client.query_binary_type(header.type_id, header.schema_id) + if not result: + raise RuntimeError('Binary type is not registered') + return result + + async def register_binary_type(self, *args, **kwargs): + await self.client.register_binary_type(*args, **kwargs) diff --git a/pyignite/utils.py b/pyignite/utils.py index f1a7f90..975f414 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -15,6 +15,7 @@ import ctypes import decimal +import inspect import warnings from functools import wraps @@ -65,23 +66,14 @@ def is_hinted(value): """ Check if a value is a tuple of data item and its type hint. """ - return ( - isinstance(value, tuple) - and len(value) == 2 - and issubclass(value[1], IgniteDataType) - ) + return isinstance(value, tuple) and len(value) == 2 and issubclass(value[1], IgniteDataType) def is_wrapped(value: Any) -> bool: """ Check if a value is of WrappedDataObject type. """ - return ( - type(value) is tuple - and len(value) == 2 - and type(value[0]) is bytes - and type(value[1]) is int - ) + return type(value) is tuple and len(value) == 2 and type(value[0]) is bytes and type(value[1]) is int def int_overflow(value: int) -> int: @@ -107,7 +99,7 @@ def hashcode(data: Union[str, bytes, bytearray, memoryview]) -> int: def __hashcode_fallback(data: Union[str, bytes, bytearray, memoryview]) -> int: if data is None: return 0 - + if isinstance(data, str): """ For strings we iterate over code point which are of the int type @@ -206,8 +198,7 @@ def decimal_hashcode(value: decimal.Decimal) -> int: # this is the case when Java BigDecimal digits are stored # compactly, in the internal 64-bit integer field int_hash = ( - (unsigned(value, ctypes.c_ulonglong) >> 32) * 31 - + (value & LONG_MASK) + (unsigned(value, ctypes.c_ulonglong) >> 32) * 31 + (value & LONG_MASK) ) & LONG_MASK else: # digits are not fit in the 64-bit long, so they get split internally @@ -243,25 +234,31 @@ def datetime_hashcode(value: int) -> int: def status_to_exception(exc: Type[Exception]): """ Converts erroneous status code with error message to an exception - of the given class. + of the given class. Supports coroutines. :param exc: the class of exception to raise, - :return: decorator. + :return: decorated function. """ + def process_result(result): + if result.status != 0: + raise exc(result.message) + return result.value + def ste_decorator(fn): - @wraps(fn) - def ste_wrapper(*args, **kwargs): - result = fn(*args, **kwargs) - if result.status != 0: - raise exc(result.message) - return result.value - return ste_wrapper + if inspect.iscoroutinefunction(fn): + @wraps(fn) + async def ste_wrapper_async(*args, **kwargs): + return process_result(await fn(*args, **kwargs)) + return ste_wrapper_async + else: + @wraps(fn) + def ste_wrapper(*args, **kwargs): + return process_result(fn(*args, **kwargs)) + return ste_wrapper return ste_decorator -def get_field_by_id( - obj: 'GenericObjectMeta', field_id: int -) -> Tuple[Any, IgniteDataType]: +def get_field_by_id(obj: 'GenericObjectMeta', field_id: int) -> Tuple[Any, IgniteDataType]: """ Returns a complex object's field value, given the field's entity ID. diff --git a/requirements/tests.txt b/requirements/tests.txt index 5d5ae84..38a8e9e 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,7 +1,10 @@ # these packages are used for testing +async_generator==1.10; python_version < '3.7' pytest==6.2.2 pytest-cov==2.11.1 +pytest-asyncio==0.14.0 teamcity-messages==1.28 psutil==5.8.0 jinja2==2.11.3 +flake8==3.8.4 diff --git a/setup.py b/setup.py index 4d90e4e..5db3aed 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import re from collections import defaultdict from distutils.command.build_ext import build_ext from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError @@ -86,6 +86,14 @@ def is_a_requirement(line): with open('README.md', 'r', encoding='utf-8') as readme_file: long_description = readme_file.read() +version = '' +with open('pyignite/__init__.py', 'r') as fd: + version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + def run_setup(with_binary=True): if with_binary: @@ -98,7 +106,7 @@ def run_setup(with_binary=True): setuptools.setup( name='pyignite', - version='0.4.0', + version=version, python_requires='>=3.6', author='The Apache Software Foundation', author_email='dev@ignite.apache.org', diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py index 7595f25..2ec2b1b 100644 --- a/tests/affinity/conftest.py +++ b/tests/affinity/conftest.py @@ -15,8 +15,7 @@ import pytest -from pyignite import Client -from pyignite.api import cache_create, cache_destroy +from pyignite import Client, AioClient from tests.util import start_ignite_gen # Sometimes on slow testing servers and unstable topology @@ -42,29 +41,21 @@ def server3(): @pytest.fixture def client(): client = Client(partition_aware=True, timeout=CLIENT_SOCKET_TIMEOUT) - - client.connect([('127.0.0.1', 10800 + i) for i in range(1, 4)]) - - yield client - - client.close() - - -@pytest.fixture -def client_not_connected(): - client = Client(partition_aware=True, timeout=CLIENT_SOCKET_TIMEOUT) - yield client - client.close() + try: + client.connect([('127.0.0.1', 10800 + i) for i in range(1, 4)]) + yield client + finally: + client.close() @pytest.fixture -def cache(connected_client): - cache_name = 'my_bucket' - conn = connected_client.random_node - - cache_create(conn, cache_name) - yield cache_name - cache_destroy(conn, cache_name) +async def async_client(): + client = AioClient(partition_aware=True) + try: + await client.connect([('127.0.0.1', 10800 + i) for i in range(1, 4)]) + yield client + finally: + await client.close() @pytest.fixture(scope='module', autouse=True) diff --git a/tests/affinity/test_affinity.py b/tests/affinity/test_affinity.py index ee8f6c0..b1bcec7 100644 --- a/tests/affinity/test_affinity.py +++ b/tests/affinity/test_affinity.py @@ -13,178 +13,265 @@ # See the License for the specific language governing permissions and # limitations under the License. -from datetime import datetime, timedelta +import asyncio import decimal +from datetime import datetime, timedelta from uuid import UUID, uuid4 import pytest -from pyignite import GenericObjectMeta -from pyignite.api import * -from pyignite.constants import * -from pyignite.datatypes import * +from pyignite import GenericObjectMeta, AioClient +from pyignite.api import ( + cache_get_node_partitions, cache_get_node_partitions_async, cache_local_peek, cache_local_peek_async +) +from pyignite.constants import MAX_INT +from pyignite.datatypes import ( + BinaryObject, ByteArray, ByteObject, IntObject, ShortObject, LongObject, FloatObject, DoubleObject, BoolObject, + CharObject, String, UUIDObject, DecimalObject, TimestampObject, TimeObject +) from pyignite.datatypes.cache_config import CacheMode -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_MODE, PROP_CACHE_KEY_CONFIGURATION +from tests.util import wait_for_condition, wait_for_condition_async -def test_get_node_partitions(client): - conn = client.random_node +def test_get_node_partitions(client, caches): + cache_ids = [cache.cache_id for cache in caches] + __wait_for_ready_affinity(client, cache_ids) + mappings = __get_mappings(client, cache_ids) + __check_mappings(mappings, cache_ids) - cache_1 = client.get_or_create_cache('test_cache_1') - cache_2 = client.get_or_create_cache({ - PROP_NAME: 'test_cache_2', - PROP_CACHE_KEY_CONFIGURATION: [ - { - 'type_name': ByteArray.type_name, - 'affinity_key_field_name': 'byte_affinity', - } - ], - }) - client.get_or_create_cache('test_cache_3') - client.get_or_create_cache('test_cache_4') - client.get_or_create_cache('test_cache_5') - - result = cache_get_node_partitions( - conn, - [cache_1.cache_id, cache_2.cache_id] - ) - assert result.status == 0, result.message - - -@pytest.mark.parametrize( - 'key, key_hint', [ - # integers - (42, None), - (43, ByteObject), - (-44, ByteObject), - (45, IntObject), - (-46, IntObject), - (47, ShortObject), - (-48, ShortObject), - (49, LongObject), - (MAX_INT-50, LongObject), - (MAX_INT+51, LongObject), - - # floating point - (5.2, None), - (5.354, FloatObject), - (-5.556, FloatObject), - (-57.58, DoubleObject), - - # boolean - (True, None), - (True, BoolObject), - (False, BoolObject), - - # char - ('A', CharObject), - ('Z', CharObject), - ('⅓', CharObject), - ('á', CharObject), - ('ы', CharObject), - ('カ', CharObject), - ('Ø', CharObject), - ('ß', CharObject), - - # string - ('This is a test string', None), - ('Кириллица', None), - ('Little Mary had a lamb', String), - - # UUID - (UUID('12345678123456789876543298765432'), None), - (UUID('74274274274274274274274274274274'), UUIDObject), - (uuid4(), None), - - # decimal (long internal representation in Java) - (decimal.Decimal('-234.567'), None), - (decimal.Decimal('200.0'), None), - (decimal.Decimal('123.456'), DecimalObject), - (decimal.Decimal('1.0'), None), - (decimal.Decimal('0.02'), None), - - # decimal (BigInteger internal representation in Java) - (decimal.Decimal('12345671234567123.45671234567'), None), - (decimal.Decimal('-845678456.7845678456784567845'), None), - - # date and time - (datetime(1980, 1, 1), None), - ((datetime(1980, 1, 1), 999), TimestampObject), - (timedelta(days=99), TimeObject), - - ], -) -def test_affinity(client, key, key_hint): - cache_1 = client.get_or_create_cache({ - PROP_NAME: 'test_cache_1', - PROP_CACHE_MODE: CacheMode.PARTITIONED, - }) - value = 42 - cache_1.put(key, value, key_hint=key_hint) - best_node = cache_1.get_best_node(key, key_hint=key_hint) +@pytest.mark.asyncio +async def test_get_node_partitions_async(async_client, async_caches): + cache_ids = [cache.cache_id for cache in async_caches] + await __wait_for_ready_affinity(async_client, cache_ids) + mappings = await __get_mappings(async_client, cache_ids) + __check_mappings(mappings, cache_ids) - for node in filter(lambda n: n.alive, client._nodes): - result = cache_local_peek( - node, cache_1.cache_id, key, key_hint=key_hint, - ) - if node is best_node: - assert result.value == value, ( - 'Affinity calculation error for {}'.format(key) - ) - else: - assert result.value is None, ( - 'Affinity calculation error for {}'.format(key) - ) - cache_1.destroy() +def __wait_for_ready_affinity(client, cache_ids): + def inner(): + def condition(): + result = __get_mappings(client, cache_ids) + return len(result.value['partition_mapping']) == len(cache_ids) + wait_for_condition(condition) -def test_affinity_for_generic_object(client): - cache_1 = client.get_or_create_cache({ - PROP_NAME: 'test_cache_1', - PROP_CACHE_MODE: CacheMode.PARTITIONED, - }) + async def inner_async(): + async def condition(): + result = await __get_mappings(client, cache_ids) + return len(result.value['partition_mapping']) == len(cache_ids) - class KeyClass( - metaclass=GenericObjectMeta, - schema={ - 'NO': IntObject, - 'NAME': String, - }, - ): - pass + await wait_for_condition_async(condition) - key = KeyClass() - key.NO = 1 - key.NAME = 'test_string' + return inner_async() if isinstance(client, AioClient) else inner() - cache_1.put(key, 42, key_hint=BinaryObject) - best_node = cache_1.get_best_node(key, key_hint=BinaryObject) +def __get_mappings(client, cache_ids): + def inner(): + conn = client.random_node + result = cache_get_node_partitions(conn, cache_ids) + assert result.status == 0, result.message + return result + + async def inner_async(): + conn = await client.random_node() + result = await cache_get_node_partitions_async(conn, cache_ids) + assert result.status == 0, result.message + return result + + return inner_async() if isinstance(client, AioClient) else inner() - for node in filter(lambda n: n.alive, client._nodes): - result = cache_local_peek( - node, cache_1.cache_id, key, key_hint=BinaryObject, - ) - if node is best_node: - assert result.value == 42, ( - 'Affinity calculation error for {}'.format(key) - ) - else: - assert result.value is None, ( - 'Affinity calculation error for {}'.format(key) - ) - cache_1.destroy() +def __check_mappings(result, cache_ids): + partition_mapping = result.value['partition_mapping'] + for i, cache_id in enumerate(cache_ids): + cache_mapping = partition_mapping[cache_id] + assert 'is_applicable' in cache_mapping -def test_affinity_for_generic_object_without_type_hints(client): - cache_1 = client.get_or_create_cache({ + # Check replicated cache + if i == 3: + assert not cache_mapping['is_applicable'] + assert 'node_mapping' not in cache_mapping + assert cache_mapping['number_of_partitions'] == 0 + else: + # Check cache config + if i == 2: + assert cache_mapping['cache_config'] + + assert cache_mapping['is_applicable'] + assert cache_mapping['node_mapping'] + assert cache_mapping['number_of_partitions'] == 1024 + + +@pytest.fixture +def caches(client): + yield from __create_caches_fixture(client) + + +@pytest.fixture +async def async_caches(async_client): + async for caches in __create_caches_fixture(async_client): + yield caches + + +def __create_caches_fixture(client): + caches_to_create = [] + for i in range(0, 5): + cache_name = f'test_cache_{i}' + if i == 2: + caches_to_create.append(( + cache_name, + { + PROP_NAME: cache_name, + PROP_CACHE_KEY_CONFIGURATION: [ + { + 'type_name': ByteArray.type_name, + 'affinity_key_field_name': 'byte_affinity', + } + ] + })) + elif i == 3: + caches_to_create.append(( + cache_name, + { + PROP_NAME: cache_name, + PROP_CACHE_MODE: CacheMode.REPLICATED + } + )) + else: + caches_to_create.append((cache_name, None)) + + def generate_caches(): + caches = [] + for name, config in caches_to_create: + if config: + cache = client.get_or_create_cache(config) + else: + cache = client.get_or_create_cache(name) + caches.append(cache) + return asyncio.gather(*caches) if isinstance(client, AioClient) else caches + + def inner(): + caches = [] + try: + caches = generate_caches() + yield caches + finally: + for cache in caches: + cache.destroy() + + async def inner_async(): + caches = [] + try: + caches = await generate_caches() + yield caches + finally: + await asyncio.gather(*[cache.destroy() for cache in caches]) + + return inner_async() if isinstance(client, AioClient) else inner() + + +@pytest.fixture +def cache(client): + cache = client.get_or_create_cache({ PROP_NAME: 'test_cache_1', PROP_CACHE_MODE: CacheMode.PARTITIONED, }) + try: + yield cache + finally: + cache.destroy() + +@pytest.fixture +async def async_cache(async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: 'test_cache_1', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + }) + try: + yield cache + finally: + await cache.destroy() + + +affinity_primitives_params = [ + # integers + (42, None), + (43, ByteObject), + (-44, ByteObject), + (45, IntObject), + (-46, IntObject), + (47, ShortObject), + (-48, ShortObject), + (49, LongObject), + (MAX_INT - 50, LongObject), + (MAX_INT + 51, LongObject), + + # floating point + (5.2, None), + (5.354, FloatObject), + (-5.556, FloatObject), + (-57.58, DoubleObject), + + # boolean + (True, None), + (True, BoolObject), + (False, BoolObject), + + # char + ('A', CharObject), + ('Z', CharObject), + ('⅓', CharObject), + ('á', CharObject), + ('ы', CharObject), + ('カ', CharObject), + ('Ø', CharObject), + ('ß', CharObject), + + # string + ('This is a test string', None), + ('Кириллица', None), + ('Little Mary had a lamb', String), + + # UUID + (UUID('12345678123456789876543298765432'), None), + (UUID('74274274274274274274274274274274'), UUIDObject), + (uuid4(), None), + + # decimal (long internal representation in Java) + (decimal.Decimal('-234.567'), None), + (decimal.Decimal('200.0'), None), + (decimal.Decimal('123.456'), DecimalObject), + (decimal.Decimal('1.0'), None), + (decimal.Decimal('0.02'), None), + + # decimal (BigInteger internal representation in Java) + (decimal.Decimal('12345671234567123.45671234567'), None), + (decimal.Decimal('-845678456.7845678456784567845'), None), + + # date and time + (datetime(1980, 1, 1), None), + ((datetime(1980, 1, 1), 999), TimestampObject), + (timedelta(days=99), TimeObject) +] + + +@pytest.mark.parametrize('key, key_hint', affinity_primitives_params) +def test_affinity(client, cache, key, key_hint): + __check_best_node_calculation(client, cache, key, 42, key_hint=key_hint) + + +@pytest.mark.parametrize('key, key_hint', affinity_primitives_params) +@pytest.mark.asyncio +async def test_affinity_async(async_client, async_cache, key, key_hint): + await __check_best_node_calculation(async_client, async_cache, key, 42, key_hint=key_hint) + + +@pytest.fixture +def key_generic_object(): class KeyClass( metaclass=GenericObjectMeta, schema={ @@ -195,24 +282,47 @@ class KeyClass( pass key = KeyClass() - key.NO = 2 - key.NAME = 'another_test_string' + key.NO = 1 + key.NAME = 'test_string' + yield key + - cache_1.put(key, 42) +@pytest.mark.parametrize('with_type_hint', [True, False]) +def test_affinity_for_generic_object(client, cache, key_generic_object, with_type_hint): + key_hint = BinaryObject if with_type_hint else None + __check_best_node_calculation(client, cache, key_generic_object, 42, key_hint=key_hint) - best_node = cache_1.get_best_node(key) - for node in filter(lambda n: n.alive, client._nodes): - result = cache_local_peek( - node, cache_1.cache_id, key - ) +@pytest.mark.parametrize('with_type_hint', [True, False]) +@pytest.mark.asyncio +async def test_affinity_for_generic_object_async(async_client, async_cache, key_generic_object, with_type_hint): + key_hint = BinaryObject if with_type_hint else None + await __check_best_node_calculation(async_client, async_cache, key_generic_object, 42, key_hint=key_hint) + + +def __check_best_node_calculation(client, cache, key, value, key_hint=None): + def check_peek_value(node, best_node, result): if node is best_node: - assert result.value == 42, ( - 'Affinity calculation error for {}'.format(key) - ) + assert result.value == value, f'Affinity calculation error for {key}' else: - assert result.value is None, ( - 'Affinity calculation error for {}'.format(key) - ) + assert result.value is None, f'Affinity calculation error for {key}' + + def inner(): + cache.put(key, value, key_hint=key_hint) + best_node = cache.get_best_node(key, key_hint=key_hint) + + for node in filter(lambda n: n.alive, client._nodes): + result = cache_local_peek(node, cache.cache_id, key, key_hint=key_hint) + + check_peek_value(node, best_node, result) + + async def inner_async(): + await cache.put(key, value, key_hint=key_hint) + best_node = await cache.get_best_node(key, key_hint=key_hint) + + for node in filter(lambda n: n.alive, client._nodes): + result = await cache_local_peek_async(node, cache.cache_id, key, key_hint=key_hint) + + check_peek_value(node, best_node, result) - cache_1.destroy() + return inner_async() if isinstance(client, AioClient) else inner() diff --git a/tests/affinity/test_affinity_bad_servers.py b/tests/affinity/test_affinity_bad_servers.py index 6fd08d5..b169168 100644 --- a/tests/affinity/test_affinity_bad_servers.py +++ b/tests/affinity/test_affinity_bad_servers.py @@ -15,9 +15,9 @@ import pytest -from pyignite.exceptions import ReconnectError +from pyignite.exceptions import ReconnectError, connection_errors from tests.affinity.conftest import CLIENT_SOCKET_TIMEOUT -from tests.util import start_ignite, kill_process_tree, get_client +from tests.util import start_ignite, kill_process_tree, get_client, get_client_async @pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) @@ -26,10 +26,16 @@ def with_partition_awareness(request): def test_client_with_multiple_bad_servers(with_partition_awareness): - with pytest.raises(ReconnectError) as e_info: + with pytest.raises(ReconnectError, match="Can not connect."): with get_client(partition_aware=with_partition_awareness) as client: client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) - assert str(e_info.value) == "Can not connect." + + +@pytest.mark.asyncio +async def test_client_with_multiple_bad_servers_async(with_partition_awareness): + with pytest.raises(ReconnectError, match="Can not connect."): + async with get_client_async(partition_aware=with_partition_awareness) as client: + await client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) def test_client_with_failed_server(request, with_partition_awareness): @@ -52,6 +58,27 @@ def test_client_with_failed_server(request, with_partition_awareness): kill_process_tree(srv.pid) +@pytest.mark.asyncio +async def test_client_with_failed_server_async(request, with_partition_awareness): + srv = start_ignite(idx=4) + try: + async with get_client_async(partition_aware=with_partition_awareness) as client: + await client.connect([("127.0.0.1", 10804)]) + cache = await client.get_or_create_cache(request.node.name) + await cache.put(1, 1) + kill_process_tree(srv.pid) + + if with_partition_awareness: + ex_class = (ReconnectError, ConnectionResetError) + else: + ex_class = ConnectionResetError + + with pytest.raises(ex_class): + await cache.get(1) + finally: + kill_process_tree(srv.pid) + + def test_client_with_recovered_server(request, with_partition_awareness): srv = start_ignite(idx=4) try: @@ -67,7 +94,7 @@ def test_client_with_recovered_server(request, with_partition_awareness): # First request may fail. try: cache.put(1, 2) - except: + except connection_errors: pass # Retry succeeds @@ -75,3 +102,29 @@ def test_client_with_recovered_server(request, with_partition_awareness): assert cache.get(1) == 2 finally: kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_client_with_recovered_server_async(request, with_partition_awareness): + srv = start_ignite(idx=4) + try: + async with get_client_async(partition_aware=with_partition_awareness) as client: + await client.connect([("127.0.0.1", 10804)]) + cache = await client.get_or_create_cache(request.node.name) + await cache.put(1, 1) + + # Kill and restart server + kill_process_tree(srv.pid) + srv = start_ignite(idx=4) + + # First request may fail. + try: + await cache.put(1, 2) + except connection_errors: + pass + + # Retry succeeds + await cache.put(1, 2) + assert await cache.get(1) == 2 + finally: + kill_process_tree(srv.pid) diff --git a/tests/affinity/test_affinity_request_routing.py b/tests/affinity/test_affinity_request_routing.py index 101db39..64197ff 100644 --- a/tests/affinity/test_affinity_request_routing.py +++ b/tests/affinity/test_affinity_request_routing.py @@ -13,20 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +import asyncio from collections import OrderedDict, deque +import random + import pytest -from pyignite import * -from pyignite.connection import Connection +from pyignite import GenericObjectMeta, AioClient, Client +from pyignite.aio_cache import AioCache +from pyignite.connection import Connection, AioConnection from pyignite.constants import PROTOCOL_BYTE_ORDER -from pyignite.datatypes import * +from pyignite.datatypes import String, LongObject from pyignite.datatypes.cache_config import CacheMode -from pyignite.datatypes.prop_codes import * -from tests.util import * - +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_BACKUPS_NUMBER, PROP_CACHE_KEY_CONFIGURATION, PROP_CACHE_MODE +from tests.util import wait_for_condition, wait_for_condition_async, start_ignite, kill_process_tree requests = deque() old_send = Connection.send +old_send_async = AioConnection._send def patched_send(self, *args, **kwargs): @@ -40,13 +44,26 @@ def patched_send(self, *args, **kwargs): return old_send(self, *args, **kwargs) +async def patched_send_async(self, *args, **kwargs): + """Patched send function that push to queue idx of server to which request is routed.""" + buf = args[0] + if buf and len(buf) >= 6: + op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER) + # Filter only caches operation. + if 1000 <= op_code < 1100: + requests.append(self.port % 100) + return await old_send_async(self, *args, **kwargs) + + def setup_function(): requests.clear() Connection.send = patched_send + AioConnection._send = patched_send_async def teardown_function(): Connection.send = old_send + AioConnection.send = old_send_async def wait_for_affinity_distribution(cache, key, node_idx, timeout=30): @@ -68,6 +85,25 @@ def check_grid_idx(): f"got {real_node_idx} instead") +async def wait_for_affinity_distribution_async(cache, key, node_idx, timeout=30): + real_node_idx = 0 + + async def check_grid_idx(): + nonlocal real_node_idx + try: + await cache.get(key) + real_node_idx = requests.pop() + except (OSError, IOError): + return False + return real_node_idx == node_idx + + res = await wait_for_condition_async(check_grid_idx, timeout=timeout) + + if not res: + raise TimeoutError(f"failed to wait for affinity distribution, expected node_idx {node_idx}," + f"got {real_node_idx} instead") + + @pytest.mark.parametrize("key,grid_idx", [(1, 1), (2, 2), (3, 3), (4, 1), (5, 1), (6, 2), (11, 1), (13, 1), (19, 1)]) @pytest.mark.parametrize("backups", [0, 1, 2, 3]) def test_cache_operation_on_primitive_key_routes_request_to_primary_node(request, key, grid_idx, backups, client): @@ -75,52 +111,56 @@ def test_cache_operation_on_primitive_key_routes_request_to_primary_node(request PROP_NAME: request.node.name + str(backups), PROP_BACKUPS_NUMBER: backups, }) + try: + __perform_operations_on_primitive_key(client, cache, key, grid_idx) + finally: + cache.destroy() - cache.put(key, key) - wait_for_affinity_distribution(cache, key, grid_idx) - - # Test - cache.get(key) - assert requests.pop() == grid_idx - - cache.put(key, key) - assert requests.pop() == grid_idx - - cache.replace(key, key + 1) - assert requests.pop() == grid_idx - - cache.clear_key(key) - assert requests.pop() == grid_idx - - cache.contains_key(key) - assert requests.pop() == grid_idx - cache.get_and_put(key, 3) - assert requests.pop() == grid_idx +@pytest.mark.parametrize("key,grid_idx", [(1, 1), (2, 2), (3, 3), (4, 1), (5, 1), (6, 2), (11, 1), (13, 1), (19, 1)]) +@pytest.mark.parametrize("backups", [0, 1, 2, 3]) +@pytest.mark.asyncio +async def test_cache_operation_on_primitive_key_routes_request_to_primary_node_async( + request, key, grid_idx, backups, async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: request.node.name + str(backups), + PROP_BACKUPS_NUMBER: backups, + }) + try: + await __perform_operations_on_primitive_key(async_client, cache, key, grid_idx) + finally: + await cache.destroy() - cache.get_and_put_if_absent(key, 4) - assert requests.pop() == grid_idx - cache.put_if_absent(key, 5) - assert requests.pop() == grid_idx +def __perform_operations_on_primitive_key(client, cache, key, grid_idx): + operations = [ + ('get', 1), ('put', 2), ('replace', 2), ('clear_key', 1), ('contains_key', 1), ('get_and_put', 2), + ('get_and_put_if_absent', 2), ('put_if_absent', 2), ('get_and_remove', 1), ('get_and_replace', 2), + ('remove_key', 1), ('remove_if_equals', 2), ('replace', 2), ('replace_if_equals', 3) + ] - cache.get_and_remove(key) - assert requests.pop() == grid_idx + def inner(): + cache.put(key, key) + wait_for_affinity_distribution(cache, key, grid_idx) - cache.get_and_replace(key, 6) - assert requests.pop() == grid_idx + for op_name, param_nums in operations: + op = getattr(cache, op_name) + args = [random.randint(-100, 100) for _ in range(0, param_nums - 1)] + op(key, *args) + assert requests.pop() == grid_idx - cache.remove_key(key) - assert requests.pop() == grid_idx + async def inner_async(): + await cache.put(key, key) + await wait_for_affinity_distribution_async(cache, key, grid_idx) - cache.remove_if_equals(key, -1) - assert requests.pop() == grid_idx + for op_name, param_nums in operations: + op = getattr(cache, op_name) + args = [random.randint(-100, 100) for _ in range(0, param_nums - 1)] + await op(key, *args) - cache.replace(key, -1) - assert requests.pop() == grid_idx + assert requests.pop() == grid_idx - cache.replace_if_equals(key, 10, -10) - assert requests.pop() == grid_idx + return inner_async() if isinstance(client, AioClient) else inner() @pytest.mark.skip(reason="Custom key objects are not supported yet") @@ -164,50 +204,144 @@ class AffinityTestType1( assert requests.pop() == grid_idx -def test_cache_operation_routed_to_new_cluster_node(request, client_not_connected): - client_not_connected.connect( - [("127.0.0.1", 10801), ("127.0.0.1", 10802), ("127.0.0.1", 10803), ("127.0.0.1", 10804)] - ) - cache = client_not_connected.get_or_create_cache(request.node.name) - key = 12 - wait_for_affinity_distribution(cache, key, 3) - cache.put(key, key) - cache.put(key, key) - assert requests.pop() == 3 +client_routed_connection_string = [('127.0.0.1', 10800 + idx) for idx in range(1, 5)] + - srv = start_ignite(idx=4) +@pytest.fixture +def client_routed_cache(request): + client = Client(partition_aware=True) try: - # Wait for rebalance and partition map exchange - wait_for_affinity_distribution(cache, key, 4) + client.connect(client_routed_connection_string) + yield client.get_or_create_cache(request.node.name) + finally: + client.close() + - # Response is correct and comes from the new node - res = cache.get_and_remove(key) - assert res == key - assert requests.pop() == 4 +@pytest.fixture +async def async_client_routed_cache(request): + client = AioClient(partition_aware=True) + try: + await client.connect(client_routed_connection_string) + yield await client.get_or_create_cache(request.node.name) finally: - kill_process_tree(srv.pid) + await client.close() + + +def test_cache_operation_routed_to_new_cluster_node(client_routed_cache): + __perform_cache_operation_routed_to_new_node(client_routed_cache) + + +@pytest.mark.asyncio +async def test_cache_operation_routed_to_new_cluster_node_async(async_client_routed_cache): + await __perform_cache_operation_routed_to_new_node(async_client_routed_cache) + + +def __perform_cache_operation_routed_to_new_node(cache): + key = 12 + + def inner(): + wait_for_affinity_distribution(cache, key, 3) + cache.put(key, key) + cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + wait_for_affinity_distribution(cache, key, 4) + + # Response is correct and comes from the new node + res = cache.get_and_remove(key) + assert res == key + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + async def inner_async(): + await wait_for_affinity_distribution_async(cache, key, 3) + await cache.put(key, key) + await cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + await wait_for_affinity_distribution_async(cache, key, 4) + + # Response is correct and comes from the new node + res = await cache.get_and_remove(key) + assert res == key + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + return inner_async() if isinstance(cache, AioCache) else inner() -def test_replicated_cache_operation_routed_to_random_node(request, client): +@pytest.fixture +def replicated_cache(request, client): cache = client.get_or_create_cache({ PROP_NAME: request.node.name, PROP_CACHE_MODE: CacheMode.REPLICATED, }) + try: + yield cache + finally: + cache.destroy() + + +@pytest.fixture +async def async_replicated_cache(request, async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) + try: + yield cache + finally: + await cache.destroy() - verify_random_node(cache) + +def test_replicated_cache_operation_routed_to_random_node(replicated_cache): + verify_random_node(replicated_cache) + + +@pytest.mark.asyncio +async def test_replicated_cache_operation_routed_to_random_node_async(async_replicated_cache): + await verify_random_node(async_replicated_cache) def verify_random_node(cache): key = 1 - cache.put(key, key) - idx1 = requests.pop() - idx2 = idx1 - - # Try 10 times - random node may end up being the same - for _ in range(1, 10): + def inner(): cache.put(key, key) - idx2 = requests.pop() - if idx2 != idx1: - break - assert idx1 != idx2 + + idx1 = requests.pop() + idx2 = idx1 + + # Try 10 times - random node may end up being the same + for _ in range(1, 10): + cache.put(key, key) + idx2 = requests.pop() + if idx2 != idx1: + break + assert idx1 != idx2 + + async def inner_async(): + await cache.put(key, key) + + idx1 = requests.pop() + + idx2 = idx1 + + # Try 10 times - random node may end up being the same + for _ in range(1, 10): + await cache.put(key, key) + idx2 = requests.pop() + + if idx2 != idx1: + break + assert idx1 != idx2 + + return inner_async() if isinstance(cache, AioCache) else inner() diff --git a/tests/affinity/test_affinity_single_connection.py b/tests/affinity/test_affinity_single_connection.py index 0768011..c3d2473 100644 --- a/tests/affinity/test_affinity_single_connection.py +++ b/tests/affinity/test_affinity_single_connection.py @@ -15,15 +15,27 @@ import pytest -from pyignite import Client +from pyignite import Client, AioClient -@pytest.fixture(scope='module') +@pytest.fixture def client(): client = Client(partition_aware=True) - client.connect('127.0.0.1', 10801) - yield client - client.close() + try: + client.connect('127.0.0.1', 10801) + yield client + finally: + client.close() + + +@pytest.fixture +async def async_client(): + client = AioClient(partition_aware=True) + try: + await client.connect('127.0.0.1', 10801) + yield client + finally: + await client.close() def test_all_cache_operations_with_partition_aware_client_on_single_server(request, client): @@ -108,3 +120,88 @@ def test_all_cache_operations_with_partition_aware_client_on_single_server(reque assert not res assert res2 assert cache.get(key) == key2 + + +@pytest.mark.asyncio +async def test_all_cache_operations_with_partition_aware_client_on_single_server_async(request, async_client): + cache = await async_client.get_or_create_cache(request.node.name) + key = 1 + key2 = 2 + + # Put/Get + await cache.put(key, key) + assert await cache.get(key) == key + + # Replace + res = await cache.replace(key, key2) + assert res + assert await cache.get(key) == key2 + + # Clear + await cache.put(key2, key2) + await cache.clear_key(key2) + assert await cache.get(key2) is None + + # ContainsKey + assert await cache.contains_key(key) + assert not await cache.contains_key(key2) + + # GetAndPut + await cache.put(key, key) + res = await cache.get_and_put(key, key2) + assert res == key + assert await cache.get(key) == key2 + + # GetAndPutIfAbsent + await cache.clear_key(key) + res = await cache.get_and_put_if_absent(key, key) + res2 = await cache.get_and_put_if_absent(key, key2) + assert res is None + assert res2 == key + assert await cache.get(key) == key + + # PutIfAbsent + await cache.clear_key(key) + res = await cache.put_if_absent(key, key) + res2 = await cache.put_if_absent(key, key2) + assert res + assert not res2 + assert await cache.get(key) == key + + # GetAndRemove + await cache.put(key, key) + res = await cache.get_and_remove(key) + assert res == key + assert await cache.get(key) is None + + # GetAndReplace + await cache.put(key, key) + res = await cache.get_and_replace(key, key2) + assert res == key + assert await cache.get(key) == key2 + + # RemoveKey + await cache.put(key, key) + await cache.remove_key(key) + assert await cache.get(key) is None + + # RemoveIfEquals + await cache.put(key, key) + res = await cache.remove_if_equals(key, key2) + res2 = await cache.remove_if_equals(key, key) + assert not res + assert res2 + assert await cache.get(key) is None + + # Replace + await cache.put(key, key) + await cache.replace(key, key2) + assert await cache.get(key) == key2 + + # ReplaceIfEquals + await cache.put(key, key) + res = await cache.replace_if_equals(key, key2, key2) + res2 = await cache.replace_if_equals(key, key, key2) + assert not res + assert res2 + assert await cache.get(key) == key2 diff --git a/tests/common/conftest.py b/tests/common/conftest.py index 402aede..243d822 100644 --- a/tests/common/conftest.py +++ b/tests/common/conftest.py @@ -15,8 +15,7 @@ import pytest -from pyignite import Client -from pyignite.api import cache_create, cache_destroy +from pyignite import Client, AioClient from tests.util import start_ignite_gen @@ -38,19 +37,36 @@ def server3(): @pytest.fixture(scope='module') def client(): client = Client() + try: + client.connect('127.0.0.1', 10801) + yield client + finally: + client.close() - client.connect('127.0.0.1', 10801) - yield client +@pytest.fixture(scope='module') +async def async_client(event_loop): + client = AioClient() + try: + await client.connect('127.0.0.1', 10801) + yield client + finally: + await client.close() + - client.close() +@pytest.fixture +async def async_cache(async_client: 'AioClient'): + cache = await async_client.create_cache('my_bucket') + try: + yield cache + finally: + await cache.destroy() @pytest.fixture def cache(client): - cache_name = 'my_bucket' - conn = client.random_node - - cache_create(conn, cache_name) - yield cache_name - cache_destroy(conn, cache_name) + cache = client.create_cache('my_bucket') + try: + yield cache + finally: + cache.destroy() diff --git a/tests/common/test_binary.py b/tests/common/test_binary.py index 5fa2ec4..1d7192f 100644 --- a/tests/common/test_binary.py +++ b/tests/common/test_binary.py @@ -16,15 +16,17 @@ from collections import OrderedDict from decimal import Decimal +import pytest + from pyignite import GenericObjectMeta +from pyignite.aio_cache import AioCache from pyignite.datatypes import ( BinaryObject, BoolObject, IntObject, DecimalObject, LongObject, String, ByteObject, ShortObject, FloatObject, DoubleObject, CharObject, UUIDObject, DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, ByteArrayObject, ShortArrayObject, IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, CharArrayObject, BoolArrayObject, UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, EnumArrayObject, StringArrayObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject) -from pyignite.datatypes.prop_codes import * - +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_SQL_SCHEMA, PROP_QUERY_ENTITIES insert_data = [ [1, True, 'asdf', 42, Decimal('2.4')], @@ -54,7 +56,7 @@ insert_query = ''' INSERT INTO {} ( - test_pk, test_bool, test_str, test_int, test_decimal, + test_pk, test_bool, test_str, test_int, test_decimal, ) VALUES (?, ?, ?, ?, ?)'''.format(table_sql_name) select_query = '''SELECT * FROM {}'''.format(table_sql_name) @@ -62,51 +64,69 @@ drop_query = 'DROP TABLE {} IF EXISTS'.format(table_sql_name) -def test_sql_read_as_binary(client): +@pytest.fixture +def table_cache_read(client): client.sql(drop_query) - - # create table client.sql(create_query) - # insert some rows for line in insert_data: client.sql(insert_query, query_args=line) - table_cache = client.get_cache(table_cache_name) - result = table_cache.scan() - - # convert Binary object fields' values to a tuple - # to compare it with the initial data - for key, value in result: - assert key in {x[0] for x in insert_data} - assert ( - value.TEST_BOOL, - value.TEST_STR, - value.TEST_INT, - value.TEST_DECIMAL - ) in {tuple(x[1:]) for x in insert_data} - - client.sql(drop_query) + cache = client.get_cache(table_cache_name) + yield cache + cache.destroy() -def test_sql_write_as_binary(client): - # configure cache as an SQL table - type_name = table_cache_name +@pytest.fixture +async def table_cache_read_async(async_client): + await async_client.sql(drop_query) + await async_client.sql(create_query) - # register binary type - class AllDataType( - metaclass=GenericObjectMeta, - type_name=type_name, - schema=OrderedDict([ - ('TEST_BOOL', BoolObject), - ('TEST_STR', String), - ('TEST_INT', IntObject), - ('TEST_DECIMAL', DecimalObject), - ]), - ): - pass - - table_cache = client.get_or_create_cache({ + for line in insert_data: + await async_client.sql(insert_query, query_args=line) + + cache = await async_client.get_cache(table_cache_name) + yield cache + await cache.destroy() + + +def test_sql_read_as_binary(table_cache_read): + with table_cache_read.scan() as cursor: + # convert Binary object fields' values to a tuple + # to compare it with the initial data + for key, value in cursor: + assert key in {x[0] for x in insert_data} + assert (value.TEST_BOOL, value.TEST_STR, value.TEST_INT, value.TEST_DECIMAL) \ + in {tuple(x[1:]) for x in insert_data} + + +@pytest.mark.asyncio +async def test_sql_read_as_binary_async(table_cache_read_async): + async with table_cache_read_async.scan() as cursor: + # convert Binary object fields' values to a tuple + # to compare it with the initial data + async for key, value in cursor: + assert key in {x[0] for x in insert_data} + assert (value.TEST_BOOL, value.TEST_STR, value.TEST_INT, value.TEST_DECIMAL) \ + in {tuple(x[1:]) for x in insert_data} + + +class AllDataType( + metaclass=GenericObjectMeta, + type_name=table_cache_name, + schema=OrderedDict([ + ('TEST_BOOL', BoolObject), + ('TEST_STR', String), + ('TEST_INT', IntObject), + ('TEST_DECIMAL', DecimalObject), + ]), +): + pass + + +@pytest.fixture +def table_cache_write_settings(): + return { PROP_NAME: table_cache_name, PROP_SQL_SCHEMA: scheme_name, PROP_QUERY_ENTITIES: [ @@ -142,15 +162,18 @@ class AllDataType( }, ], 'query_indexes': [], - 'value_type_name': type_name, + 'value_type_name': table_cache_name, 'value_field_name': None, }, ], - }) - table_settings = table_cache.settings - assert table_settings, 'SQL table cache settings are empty' + } + + +@pytest.fixture +def table_cache_write(client, table_cache_write_settings): + cache = client.get_or_create_cache(table_cache_write_settings) + assert cache.settings, 'SQL table cache settings are empty' - # insert rows as k-v for row in insert_data: value = AllDataType() ( @@ -159,13 +182,39 @@ class AllDataType( value.TEST_INT, value.TEST_DECIMAL, ) = row[1:] - table_cache.put(row[0], value, key_hint=IntObject) + cache.put(row[0], value, key_hint=IntObject) + + data = cache.scan() + assert len(list(data)) == len(insert_data), 'Not all data was read as key-value' + + yield cache + cache.destroy() - data = table_cache.scan() - assert len(list(data)) == len(insert_data), ( - 'Not all data was read as key-value' - ) +@pytest.fixture +async def async_table_cache_write(async_client, table_cache_write_settings): + cache = await async_client.get_or_create_cache(table_cache_write_settings) + assert await cache.settings(), 'SQL table cache settings are empty' + + for row in insert_data: + value = AllDataType() + ( + value.TEST_BOOL, + value.TEST_STR, + value.TEST_INT, + value.TEST_DECIMAL, + ) = row[1:] + await cache.put(row[0], value, key_hint=IntObject) + + async with cache.scan() as cursor: + data = [a async for a in cursor] + assert len(data) == len(insert_data), 'Not all data was read as key-value' + + yield cache + await cache.destroy() + + +def test_sql_write_as_binary(client, table_cache_write): # read rows as SQL data = client.sql(select_query, include_field_names=True) @@ -176,14 +225,29 @@ class AllDataType( data = list(data) assert len(data) == len(insert_data), 'Not all data was read as SQL rows' - # cleanup - table_cache.destroy() + +@pytest.mark.asyncio +async def test_sql_write_as_binary_async(async_client, async_table_cache_write): + # read rows as SQL + async with async_client.sql(select_query, include_field_names=True) as cursor: + header_row = await cursor.__anext__() + for field_name in AllDataType.schema.keys(): + assert field_name in header_row, 'Not all field names in header row' + + data = [v async for v in cursor] + assert len(data) == len(insert_data), 'Not all data was read as SQL rows' -def test_nested_binary_objects(client): +def test_nested_binary_objects(cache): + __check_nested_binary_objects(cache) - nested_cache = client.get_or_create_cache('nested_binary') +@pytest.mark.asyncio +async def test_nested_binary_objects_async(async_cache): + await __check_nested_binary_objects(async_cache) + + +def __check_nested_binary_objects(cache): class InnerType( metaclass=GenericObjectMeta, schema=OrderedDict([ @@ -203,29 +267,42 @@ class OuterType( ): pass - inner = InnerType(inner_int=42, inner_str='This is a test string') + def prepare_obj(): + inner = InnerType(inner_int=42, inner_str='This is a test string') + + return OuterType( + outer_int=43, + nested_binary=inner, + outer_str='This is another test string' + ) - outer = OuterType( - outer_int=43, - nested_binary=inner, - outer_str='This is another test string' - ) + def check_obj(result): + assert result.outer_int == 43 + assert result.outer_str == 'This is another test string' + assert result.nested_binary.inner_int == 42 + assert result.nested_binary.inner_str == 'This is a test string' - nested_cache.put(1, outer) + async def inner_async(): + await cache.put(1, prepare_obj()) + check_obj(await cache.get(1)) - result = nested_cache.get(1) - assert result.outer_int == 43 - assert result.outer_str == 'This is another test string' - assert result.nested_binary.inner_int == 42 - assert result.nested_binary.inner_str == 'This is a test string' + def inner(): + cache.put(1, prepare_obj()) + check_obj(cache.get(1)) - nested_cache.destroy() + return inner_async() if isinstance(cache, AioCache) else inner() -def test_add_schema_to_binary_object(client): +def test_add_schema_to_binary_object(cache): + __check_add_schema_to_binary_object(cache) - migrate_cache = client.get_or_create_cache('migrate_binary') +@pytest.mark.asyncio +async def test_add_schema_to_binary_object_async(async_cache): + await __check_add_schema_to_binary_object(async_cache) + + +def __check_add_schema_to_binary_object(cache): class MyBinaryType( metaclass=GenericObjectMeta, schema=OrderedDict([ @@ -236,54 +313,66 @@ class MyBinaryType( ): pass - binary_object = MyBinaryType( - test_str='Test string', - test_int=42, - test_bool=True, - ) - migrate_cache.put(1, binary_object) + def prepare_bo_v1(): + return MyBinaryType(test_str='Test string', test_int=42, test_bool=True) - result = migrate_cache.get(1) - assert result.test_str == 'Test string' - assert result.test_int == 42 - assert result.test_bool is True + def check_bo_v1(result): + assert result.test_str == 'Test string' + assert result.test_int == 42 + assert result.test_bool is True - modified_schema = MyBinaryType.schema.copy() - modified_schema['test_decimal'] = DecimalObject - del modified_schema['test_bool'] + def prepare_bo_v2(): + modified_schema = MyBinaryType.schema.copy() + modified_schema['test_decimal'] = DecimalObject + del modified_schema['test_bool'] - class MyBinaryTypeV2( - metaclass=GenericObjectMeta, - type_name='MyBinaryType', - schema=modified_schema, - ): - pass + class MyBinaryTypeV2( + metaclass=GenericObjectMeta, + type_name='MyBinaryType', + schema=modified_schema, + ): + pass + + assert MyBinaryType.type_id == MyBinaryTypeV2.type_id + assert MyBinaryType.schema_id != MyBinaryTypeV2.schema_id - assert MyBinaryType.type_id == MyBinaryTypeV2.type_id - assert MyBinaryType.schema_id != MyBinaryTypeV2.schema_id + return MyBinaryTypeV2(test_str='Another test', test_int=43, test_decimal=Decimal('2.34')) - binary_object_v2 = MyBinaryTypeV2( - test_str='Another test', - test_int=43, - test_decimal=Decimal('2.34') - ) + def check_bo_v2(result): + assert result.test_str == 'Another test' + assert result.test_int == 43 + assert result.test_decimal == Decimal('2.34') + assert not hasattr(result, 'test_bool') - migrate_cache.put(2, binary_object_v2) + async def inner_async(): + await cache.put(1, prepare_bo_v1()) + check_bo_v1(await cache.get(1)) + await cache.put(2, prepare_bo_v2()) + check_bo_v2(await cache.get(2)) - result = migrate_cache.get(2) - assert result.test_str == 'Another test' - assert result.test_int == 43 - assert result.test_decimal == Decimal('2.34') - assert not hasattr(result, 'test_bool') + def inner(): + cache.put(1, prepare_bo_v1()) + check_bo_v1(cache.get(1)) + cache.put(2, prepare_bo_v2()) + check_bo_v2(cache.get(2)) - migrate_cache.destroy() + return inner_async() if isinstance(cache, AioCache) else inner() -def test_complex_object_names(client): +def test_complex_object_names(cache): """ Test the ability to work with Complex types, which names contains symbols not suitable for use in Python identifiers. """ + __check_complex_object_names(cache) + + +@pytest.mark.asyncio +async def test_complex_object_names_async(async_cache): + await __check_complex_object_names(async_cache) + + +def __check_complex_object_names(cache): type_name = 'Non.Pythonic#type-name$' key = 'key' data = 'test' @@ -297,41 +386,47 @@ class NonPythonicallyNamedType( ): pass - cache = client.get_or_create_cache('test_name_cache') - cache.put(key, NonPythonicallyNamedType(field=data)) + def check(obj): + assert obj.type_name == type_name, 'Complex type name mismatch' + assert obj.field == data, 'Complex object data failure' - obj = cache.get(key) - assert obj.type_name == type_name, 'Complex type name mismatch' - assert obj.field == data, 'Complex object data failure' + async def inner_async(): + await cache.put(key, NonPythonicallyNamedType(field=data)) + check(await cache.get(key)) + def inner(): + cache.put(key, NonPythonicallyNamedType(field=data)) + check(cache.get(key)) -def test_complex_object_hash(client): - """ - Test that Python client correctly calculates hash of the binary object that - contains negative bytes. - """ - class Internal( - metaclass=GenericObjectMeta, - type_name='Internal', - schema=OrderedDict([ - ('id', IntObject), - ('str', String), - ]) - ): - pass + return inner_async() if isinstance(cache, AioCache) else inner() - class TestObject( - metaclass=GenericObjectMeta, - type_name='TestObject', - schema=OrderedDict([ - ('id', IntObject), - ('str', String), - ('internal', BinaryObject), - ]) - ): - pass - obj_ascii = TestObject() +class Internal( + metaclass=GenericObjectMeta, type_name='Internal', + schema=OrderedDict([ + ('id', IntObject), + ('str', String) + ]) +): + pass + + +class NestedObject( + metaclass=GenericObjectMeta, type_name='NestedObject', + schema=OrderedDict([ + ('id', IntObject), + ('str', String), + ('internal', BinaryObject) + ]) +): + pass + + +@pytest.fixture +def complex_objects(): + fixtures = [] + + obj_ascii = NestedObject() obj_ascii.id = 1 obj_ascii.str = 'test_string' @@ -339,11 +434,9 @@ class TestObject( obj_ascii.internal.id = 2 obj_ascii.internal.str = 'lorem ipsum' - hash_ascii = BinaryObject.hashcode(obj_ascii, client=client) - - assert hash_ascii == -1314567146, 'Invalid hashcode value for object with ASCII strings' + fixtures.append((obj_ascii, -1314567146)) - obj_utf8 = TestObject() + obj_utf8 = NestedObject() obj_utf8.id = 1 obj_utf8.str = 'юникод' @@ -351,39 +444,63 @@ class TestObject( obj_utf8.internal.id = 2 obj_utf8.internal.str = 'ユニコード' - hash_utf8 = BinaryObject.hashcode(obj_utf8, client=client) + fixtures.append((obj_utf8, -1945378474)) - assert hash_utf8 == -1945378474, 'Invalid hashcode value for object with UTF-8 strings' + yield fixtures -def test_complex_object_null_fields(client): - """ - Test that Python client can correctly write and read binary object that - contains null fields. - """ - def camel_to_snake(name): - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() - - fields = {camel_to_snake(type_.__name__): type_ for type_ in [ - ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, - DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, ByteArrayObject, ShortArrayObject, - IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, CharArrayObject, BoolArrayObject, - UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, EnumArrayObject, String, - StringArrayObject, DecimalObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject, - BinaryObject]} - - class AllTypesObject(metaclass=GenericObjectMeta, type_name='AllTypesObject', schema=fields): - pass +def test_complex_object_hash(client, complex_objects): + for obj, hash in complex_objects: + assert hash == BinaryObject.hashcode(obj, client) + + +@pytest.mark.asyncio +async def test_complex_object_hash_async(async_client, complex_objects): + for obj, hash in complex_objects: + assert hash == await BinaryObject.hashcode_async(obj, async_client) + + +def camel_to_snake(name): + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() + + +fields = {camel_to_snake(type_.__name__): type_ for type_ in [ + ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, + DateObject, TimestampObject, TimeObject, EnumObject, BinaryEnumObject, ByteArrayObject, ShortArrayObject, + IntArrayObject, LongArrayObject, FloatArrayObject, DoubleArrayObject, CharArrayObject, BoolArrayObject, + UUIDArrayObject, DateArrayObject, TimestampArrayObject, TimeArrayObject, EnumArrayObject, String, + StringArrayObject, DecimalObject, DecimalArrayObject, ObjectArrayObject, CollectionObject, MapObject, + BinaryObject]} + + +class AllTypesObject(metaclass=GenericObjectMeta, type_name='AllTypesObject', schema=fields): + pass - key = 42 - null_fields_value = AllTypesObject() + +@pytest.fixture +def null_fields_object(): + res = AllTypesObject() for field in fields.keys(): - setattr(null_fields_value, field, None) + setattr(res, field, None) + + yield res - cache = client.get_or_create_cache('all_types_test_cache') - cache.put(key, null_fields_value) - got_obj = cache.get(key) +def test_complex_object_null_fields(cache, null_fields_object): + """ + Test that Python client can correctly write and read binary object that + contains null fields. + """ + cache.put(1, null_fields_object) + assert cache.get(1) == null_fields_object, 'Objects mismatch' - assert got_obj == null_fields_value, 'Objects mismatch' + +@pytest.mark.asyncio +async def test_complex_object_null_fields_async(async_cache, null_fields_object): + """ + Test that Python client can correctly write and read binary object that + contains null fields. + """ + await async_cache.put(1, null_fields_object) + assert await async_cache.get(1) == null_fields_object, 'Objects mismatch' diff --git a/tests/common/test_cache_class.py b/tests/common/test_cache_class.py index 940160a..02dfa82 100644 --- a/tests/common/test_cache_class.py +++ b/tests/common/test_cache_class.py @@ -19,66 +19,56 @@ import pytest from pyignite import GenericObjectMeta -from pyignite.datatypes import ( - BoolObject, DecimalObject, FloatObject, IntObject, String, -) -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes import BoolObject, DecimalObject, FloatObject, IntObject, String +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_KEY_CONFIGURATION from pyignite.exceptions import CacheError, ParameterError def test_cache_create(client): cache = client.get_or_create_cache('my_oop_cache') - assert cache.name == cache.settings[PROP_NAME] == 'my_oop_cache' - cache.destroy() - - -def test_cache_remove(client): - cache = client.get_or_create_cache('my_cache') - cache.clear() - assert cache.get_size() == 0 - - cache.put_all({ - 'key_1': 1, - 'key_2': 2, - 'key_3': 3, - 'key_4': 4, - 'key_5': 5, - }) - assert cache.get_size() == 5 - - result = cache.remove_if_equals('key_1', 42) - assert result is False - assert cache.get_size() == 5 - - result = cache.remove_if_equals('key_1', 1) - assert result is True - assert cache.get_size() == 4 + try: + assert cache.name == cache.settings[PROP_NAME] == 'my_oop_cache' + finally: + cache.destroy() - cache.remove_keys(['key_1', 'key_3', 'key_5', 'key_7']) - assert cache.get_size() == 2 - cache.remove_all() - assert cache.get_size() == 0 +@pytest.mark.asyncio +async def test_cache_create_async(async_client): + cache = await async_client.get_or_create_cache('my_oop_cache') + try: + assert (await cache.name()) == (await cache.settings())[PROP_NAME] == 'my_oop_cache' + finally: + await cache.destroy() -def test_cache_get(client): +def test_get_cache(client): my_cache = client.get_or_create_cache('my_cache') - assert my_cache.settings[PROP_NAME] == 'my_cache' - my_cache.destroy() - - error = None + try: + assert my_cache.settings[PROP_NAME] == 'my_cache' + finally: + my_cache.destroy() my_cache = client.get_cache('my_cache') - try: + with pytest.raises(CacheError): _ = my_cache.settings[PROP_NAME] - except CacheError as e: - error = e - assert type(error) is CacheError + +@pytest.mark.asyncio +async def test_get_cache_async(async_client): + my_cache = await async_client.get_or_create_cache('my_cache') + try: + assert (await my_cache.settings())[PROP_NAME] == 'my_cache' + finally: + await my_cache.destroy() + + my_cache = await async_client.get_cache('my_cache') + with pytest.raises(CacheError): + _ = (await my_cache.settings())[PROP_NAME] -def test_cache_config(client): - cache_config = { +@pytest.fixture +def cache_config(): + yield { PROP_NAME: 'my_oop_cache', PROP_CACHE_KEY_CONFIGURATION: [ { @@ -87,28 +77,31 @@ def test_cache_config(client): }, ], } - client.create_cache(cache_config) - - cache = client.get_or_create_cache('my_oop_cache') - assert cache.name == cache_config[PROP_NAME] - assert ( - cache.settings[PROP_CACHE_KEY_CONFIGURATION] - == cache_config[PROP_CACHE_KEY_CONFIGURATION] - ) - cache.destroy() - -def test_cache_get_put(client): +def test_cache_config(client, cache_config): + client.create_cache(cache_config) cache = client.get_or_create_cache('my_oop_cache') - cache.put('my_key', 42) - result = cache.get('my_key') - assert result, 42 - cache.destroy() + try: + assert cache.name == cache_config[PROP_NAME] + assert cache.settings[PROP_CACHE_KEY_CONFIGURATION] == cache_config[PROP_CACHE_KEY_CONFIGURATION] + finally: + cache.destroy() -def test_cache_binary_get_put(client): +@pytest.mark.asyncio +async def test_cache_config_async(async_client, cache_config): + await async_client.create_cache(cache_config) + cache = await async_client.get_or_create_cache('my_oop_cache') + try: + assert await cache.name() == cache_config[PROP_NAME] + assert (await cache.settings())[PROP_CACHE_KEY_CONFIGURATION] == cache_config[PROP_CACHE_KEY_CONFIGURATION] + finally: + await cache.destroy() + +@pytest.fixture +def binary_type_fixture(): class TestBinaryType( metaclass=GenericObjectMeta, schema=OrderedDict([ @@ -120,52 +113,63 @@ class TestBinaryType( ): pass - cache = client.create_cache('my_oop_cache') - - my_value = TestBinaryType( + return TestBinaryType( test_bool=True, test_str='This is a test', test_int=42, test_decimal=Decimal('34.56'), ) - cache.put('my_key', my_value) + +def test_cache_binary_get_put(cache, binary_type_fixture): + cache.put('my_key', binary_type_fixture) value = cache.get('my_key') - assert value.test_bool is True - assert value.test_str == 'This is a test' - assert value.test_int == 42 - assert value.test_decimal == Decimal('34.56') + assert value.test_bool == binary_type_fixture.test_bool + assert value.test_str == binary_type_fixture.test_str + assert value.test_int == binary_type_fixture.test_int + assert value.test_decimal == binary_type_fixture.test_decimal - cache.destroy() +@pytest.mark.asyncio +async def test_cache_binary_get_put_async(async_cache, binary_type_fixture): + await async_cache.put('my_key', binary_type_fixture) -def test_get_binary_type(client): - client.put_binary_type( - 'TestBinaryType', - schema=OrderedDict([ + value = await async_cache.get('my_key') + assert value.test_bool == binary_type_fixture.test_bool + assert value.test_str == binary_type_fixture.test_str + assert value.test_int == binary_type_fixture.test_int + assert value.test_decimal == binary_type_fixture.test_decimal + + +@pytest.fixture +def binary_type_schemas_fixture(): + schemas = [ + OrderedDict([ ('TEST_BOOL', BoolObject), ('TEST_STR', String), ('TEST_INT', IntObject), - ]) - ) - client.put_binary_type( - 'TestBinaryType', - schema=OrderedDict([ + ]), + OrderedDict([ ('TEST_BOOL', BoolObject), ('TEST_STR', String), ('TEST_INT', IntObject), ('TEST_FLOAT', FloatObject), - ]) - ) - client.put_binary_type( - 'TestBinaryType', - schema=OrderedDict([ + ]), + OrderedDict([ ('TEST_BOOL', BoolObject), ('TEST_STR', String), ('TEST_INT', IntObject), ('TEST_DECIMAL', DecimalObject), ]) - ) + ] + yield 'TestBinaryType', schemas + + +def test_get_binary_type(client, binary_type_schemas_fixture): + type_name, schemas = binary_type_schemas_fixture + + for schema in schemas: + client.put_binary_type(type_name, schema=schema) binary_type_info = client.get_binary_type('TestBinaryType') assert len(binary_type_info['schemas']) == 3 @@ -175,60 +179,37 @@ def test_get_binary_type(client): assert len(binary_type_info) == 1 -@pytest.mark.parametrize('page_size', range(1, 17, 5)) -def test_cache_scan(request, client, page_size): - test_data = { - 1: 'This is a test', - 2: 'One more test', - 3: 'Foo', - 4: 'Buzz', - 5: 'Bar', - 6: 'Lorem ipsum', - 7: 'dolor sit amet', - 8: 'consectetur adipiscing elit', - 9: 'Nullam aliquet', - 10: 'nisl at ante', - 11: 'suscipit', - 12: 'ut cursus', - 13: 'metus interdum', - 14: 'Nulla tincidunt', - 15: 'sollicitudin iaculis', - } - - cache = client.get_or_create_cache(request.node.name) - cache.put_all(test_data) - - gen = cache.scan(page_size=page_size) - received_data = [] - for k, v in gen: - assert k in test_data.keys() - assert v in test_data.values() - received_data.append((k, v)) - assert len(received_data) == len(test_data) +@pytest.mark.asyncio +async def test_get_binary_type_async(async_client, binary_type_schemas_fixture): + type_name, schemas = binary_type_schemas_fixture - cache.destroy() + for schema in schemas: + await async_client.put_binary_type(type_name, schema=schema) + binary_type_info = await async_client.get_binary_type('TestBinaryType') + assert len(binary_type_info['schemas']) == 3 -def test_get_and_put_if_absent(client): - cache = client.get_or_create_cache('my_oop_cache') - - value = cache.get_and_put_if_absent('my_key', 42) - assert value is None - cache.put('my_key', 43) - value = cache.get_and_put_if_absent('my_key', 42) - assert value is 43 + binary_type_info = await async_client.get_binary_type('NonExistentType') + assert binary_type_info['type_exists'] is False + assert len(binary_type_info) == 1 -def test_cache_get_when_cache_does_not_exist(client): +def test_get_cache_errors(client): cache = client.get_cache('missing-cache') - with pytest.raises(CacheError) as e_info: - cache.put(1, 1) - assert str(e_info.value) == "Cache does not exist [cacheId= 1665146971]" + with pytest.raises(CacheError, match=r'Cache does not exist \[cacheId='): + cache.put(1, 1) -def test_cache_create_with_none_name(client): - with pytest.raises(ParameterError) as e_info: + with pytest.raises(ParameterError, match="You should supply at least cache name"): client.create_cache(None) - assert str(e_info.value) == "You should supply at least cache name" +@pytest.mark.asyncio +async def test_get_cache_errors_async(async_client): + cache = await async_client.get_cache('missing-cache') + + with pytest.raises(CacheError, match=r'Cache does not exist \[cacheId='): + await cache.put(1, 1) + + with pytest.raises(ParameterError, match="You should supply at least cache name"): + await async_client.create_cache(None) diff --git a/tests/common/test_cache_class_sql.py b/tests/common/test_cache_class_sql.py deleted file mode 100644 index 5f72b39..0000000 --- a/tests/common/test_cache_class_sql.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - - -initial_data = [ - ('John', 'Doe', 5), - ('Jane', 'Roe', 4), - ('Joe', 'Bloggs', 4), - ('Richard', 'Public', 3), - ('Negidius', 'Numerius', 3), - ] - -create_query = '''CREATE TABLE Student ( - id INT(11) PRIMARY KEY, - first_name CHAR(24), - last_name CHAR(32), - grade INT(11))''' - -insert_query = '''INSERT INTO Student(id, first_name, last_name, grade) -VALUES (?, ?, ?, ?)''' - -select_query = 'SELECT id, first_name, last_name, grade FROM Student' - -drop_query = 'DROP TABLE Student IF EXISTS' - - -@pytest.mark.parametrize('page_size', range(1, 6, 2)) -def test_sql_fields(client, page_size): - - client.sql(drop_query, page_size) - - result = client.sql(create_query, page_size) - assert next(result)[0] == 0 - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = client.sql( - insert_query, - page_size, - query_args=[i, fname, lname, grade] - ) - assert next(result)[0] == 1 - - result = client.sql( - select_query, - page_size, - include_field_names=True, - ) - field_names = next(result) - assert set(field_names) == {'ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'} - - data = list(result) - assert len(data) == 5 - for row in data: - assert len(row) == 4 - - client.sql(drop_query, page_size) - - -@pytest.mark.parametrize('page_size', range(1, 6, 2)) -def test_sql(client, page_size): - - client.sql(drop_query, page_size) - - result = client.sql(create_query, page_size) - assert next(result)[0] == 0 - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = client.sql( - insert_query, - page_size, - query_args=[i, fname, lname, grade] - ) - assert next(result)[0] == 1 - - student = client.get_or_create_cache('SQL_PUBLIC_STUDENT') - result = student.select_row('TRUE', page_size) - for k, v in result: - assert k in range(1, 6) - assert v.FIRST_NAME in [ - 'John', - 'Jane', - 'Joe', - 'Richard', - 'Negidius', - ] - - client.sql(drop_query, page_size) diff --git a/tests/common/test_cache_composite_key_class_sql.py b/tests/common/test_cache_composite_key_class_sql.py deleted file mode 100644 index 989a229..0000000 --- a/tests/common/test_cache_composite_key_class_sql.py +++ /dev/null @@ -1,122 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict - -from pyignite import GenericObjectMeta -from pyignite.datatypes import ( - IntObject, String -) - - -class StudentKey( - metaclass=GenericObjectMeta, - type_name='test.model.StudentKey', - schema=OrderedDict([ - ('ID', IntObject), - ('DEPT', String) - ]) - ): - pass - - -class Student( - metaclass=GenericObjectMeta, - type_name='test.model.Student', - schema=OrderedDict([ - ('NAME', String), - ]) - ): - pass - - -create_query = '''CREATE TABLE StudentTable ( - id INT(11), - dept VARCHAR, - name CHAR(24), - PRIMARY KEY (id, dept)) - WITH "CACHE_NAME=StudentCache, KEY_TYPE=test.model.StudentKey, VALUE_TYPE=test.model.Student"''' - -insert_query = '''INSERT INTO StudentTable (id, dept, name) VALUES (?, ?, ?)''' - -select_query = 'SELECT _KEY, id, dept, name FROM StudentTable' - -drop_query = 'DROP TABLE StudentTable IF EXISTS' - - -def test_cache_get_with_composite_key_finds_sql_value(client): - """ - Should query a record with composite key and calculate - internal hashcode correctly. - """ - - client.sql(drop_query) - - # Create table. - result = client.sql(create_query) - assert next(result)[0] == 0 - - student_key = StudentKey(1, 'Acct') - student_val = Student('John') - - # Put new Strudent with StudentKey. - result = client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME]) - assert next(result)[0] == 1 - - # Cache get finds the same value. - studentCache = client.get_cache('StudentCache') - val = studentCache.get(student_key) - assert val is not None - assert val.NAME == student_val.NAME - - query_result = list(client.sql(select_query, include_field_names=True)) - - validate_query_result(student_key, student_val, query_result) - - -def test_python_sql_finds_inserted_value_with_composite_key(client): - """ - Insert a record with a composite key and query it with SELECT SQL. - """ - - client.sql(drop_query) - - # Create table. - result = client.sql(create_query) - assert next(result)[0] == 0 - - student_key = StudentKey(2, 'Business') - student_val = Student('Abe') - - # Put new value using cache. - studentCache = client.get_cache('StudentCache') - studentCache.put(student_key, student_val) - - # Find the value using SQL. - query_result = list(client.sql(select_query, include_field_names=True)) - - validate_query_result(student_key, student_val, query_result) - - -def validate_query_result(student_key, student_val, query_result): - """ - Compare query result with expected key and value. - """ - assert len(query_result) == 2 - sql_row = dict(zip(query_result[0], query_result[1])) - - assert sql_row['ID'] == student_key.ID - assert sql_row['DEPT'] == student_key.DEPT - assert sql_row['NAME'] == student_val.NAME diff --git a/tests/common/test_cache_config.py b/tests/common/test_cache_config.py index b708b0c..f4c8067 100644 --- a/tests/common/test_cache_config.py +++ b/tests/common/test_cache_config.py @@ -12,29 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import pytest -from pyignite.api import * -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_KEY_CONFIGURATION +from pyignite.exceptions import CacheError +cache_name = 'config_cache' -def test_get_configuration(client): - conn = client.random_node - - result = cache_get_or_create(conn, 'my_unique_cache') - assert result.status == 0 - - result = cache_get_configuration(conn, 'my_unique_cache') - assert result.status == 0 - assert result.value[PROP_NAME] == 'my_unique_cache' - - -def test_create_with_config(client): - - cache_name = 'my_very_unique_name' - conn = client.random_node - - result = cache_create_with_config(conn, { +@pytest.fixture +def cache_config(): + return { PROP_NAME: cache_name, PROP_CACHE_KEY_CONFIGURATION: [ { @@ -42,38 +30,86 @@ def test_create_with_config(client): 'affinity_key_field_name': 'abc1234', } ], - }) - assert result.status == 0 + } - result = cache_get_names(conn) - assert cache_name in result.value - result = cache_create_with_config(conn, { - PROP_NAME: cache_name, - }) - assert result.status != 0 +@pytest.fixture +def cache(client): + cache = client.get_or_create_cache(cache_name) + yield cache + cache.destroy() -def test_get_or_create_with_config(client): +@pytest.fixture +async def async_cache(async_client): + cache = await async_client.get_or_create_cache(cache_name) + yield cache + await cache.destroy() - cache_name = 'my_very_unique_name' - conn = client.random_node - result = cache_get_or_create_with_config(conn, { - PROP_NAME: cache_name, - PROP_CACHE_KEY_CONFIGURATION: [ - { - 'type_name': 'blah', - 'affinity_key_field_name': 'abc1234', - } - ], - }) - assert result.status == 0 +@pytest.fixture +def cache_with_config(client, cache_config): + cache = client.get_or_create_cache(cache_config) + yield cache + cache.destroy() - result = cache_get_names(conn) - assert cache_name in result.value - result = cache_get_or_create_with_config(conn, { - PROP_NAME: cache_name, - }) - assert result.status == 0 +@pytest.fixture +async def async_cache_with_config(async_client, cache_config): + cache = await async_client.get_or_create_cache(cache_config) + yield cache + await cache.destroy() + + +def test_cache_get_configuration(client, cache): + assert cache_name in client.get_cache_names() + assert cache.settings[PROP_NAME] == cache_name + + +@pytest.mark.asyncio +async def test_cache_get_configuration_async(async_client, async_cache): + assert cache_name in (await async_client.get_cache_names()) + assert (await async_cache.settings())[PROP_NAME] == cache_name + + +def test_get_or_create_with_config_existing(client, cache_with_config, cache_config): + assert cache_name in client.get_cache_names() + + with pytest.raises(CacheError): + client.create_cache(cache_config) + + cache = client.get_or_create_cache(cache_config) + assert cache.settings == cache_with_config.settings + + +@pytest.mark.asyncio +async def test_get_or_create_with_config_existing_async(async_client, async_cache_with_config, cache_config): + assert cache_name in (await async_client.get_cache_names()) + + with pytest.raises(CacheError): + await async_client.create_cache(cache_config) + + cache = await async_client.get_or_create_cache(cache_config) + assert (await cache.settings()) == (await async_cache_with_config.settings()) + + +def test_get_or_create_with_config_new(client, cache_config): + assert cache_name not in client.get_cache_names() + cache = client.get_or_create_cache(cache_config) + try: + assert cache_name in client.get_cache_names() + assert cache.settings[PROP_NAME] == cache_name + finally: + cache.destroy() + + +@pytest.mark.asyncio +async def test_get_or_create_with_config_new_async(async_client, cache_config): + assert cache_name not in (await async_client.get_cache_names()) + + cache = await async_client.get_or_create_cache(cache_config) + try: + assert cache_name in (await async_client.get_cache_names()) + assert (await cache.settings())[PROP_NAME] == cache_name + finally: + await cache.destroy() diff --git a/tests/common/test_datatypes.py b/tests/common/test_datatypes.py index 83e9a60..c1aa19f 100644 --- a/tests/common/test_datatypes.py +++ b/tests/common/test_datatypes.py @@ -20,199 +20,239 @@ import pytest import uuid -from pyignite.api.key_value import cache_get, cache_put -from pyignite.datatypes import * +from pyignite.datatypes import ( + ByteObject, IntObject, FloatObject, CharObject, ShortObject, BoolObject, ByteArrayObject, IntArrayObject, + ShortArrayObject, FloatArrayObject, BoolArrayObject, CharArrayObject, TimestampObject, String, BinaryEnumObject, + TimestampArrayObject, BinaryEnumArrayObject, ObjectArrayObject, CollectionObject, MapObject +) from pyignite.utils import unsigned +put_get_data_params = [ + # integers + (42, None), + (42, ByteObject), + (42, ShortObject), + (42, IntObject), + + # floats + (3.1415, None), # True for Double but not Float + (3.5, FloatObject), + + # char is never autodetected + ('ы', CharObject), + ('カ', CharObject), + + # bool + (True, None), + (False, None), + (True, BoolObject), + (False, BoolObject), + + # arrays of integers + ([1, 2, 3, 5], None), + (b'buzz', ByteArrayObject), + (bytearray([7, 8, 8, 11]), None), + (bytearray([7, 8, 8, 11]), ByteArrayObject), + ([1, 2, 3, 5], ShortArrayObject), + ([1, 2, 3, 5], IntArrayObject), + + # arrays of floats + ([2.2, 4.4, 6.6], None), + ([2.5, 6.5], FloatArrayObject), + + # array of char + (['ы', 'カ'], CharArrayObject), + + # array of bool + ([True, False, True], None), + ([True, False], BoolArrayObject), + ([False, True], BoolArrayObject), + ([True, False, True, False], BoolArrayObject), + + # string + ('Little Mary had a lamb', None), + ('This is a test', String), + + # decimals + (decimal.Decimal('2.5'), None), + (decimal.Decimal('-1.3'), None), + + # uuid + (uuid.uuid4(), None), + + # date + (datetime(year=1998, month=4, day=6, hour=18, minute=30), None), + + # no autodetection for timestamp either + ( + (datetime(year=1998, month=4, day=6, hour=18, minute=30), 1000), + TimestampObject + ), + + # time + (timedelta(days=4, hours=4, minutes=24), None), + + # enum is useless in Python, except for interoperability with Java. + # Also no autodetection + ((5, 6), BinaryEnumObject), + + # arrays of standard types + (['String 1', 'String 2'], None), + (['Some of us are empty', None, 'But not the others'], None), + + ([decimal.Decimal('2.71828'), decimal.Decimal('100')], None), + ([decimal.Decimal('2.1'), None, decimal.Decimal('3.1415')], None), + + ([uuid.uuid4(), uuid.uuid4()], None), + ( + [ + datetime(year=2010, month=1, day=1), + datetime(year=2010, month=12, day=31), + ], + None, + ), + ([timedelta(minutes=30), timedelta(hours=2)], None), + ( + [ + (datetime(year=2010, month=1, day=1), 1000), + (datetime(year=2010, month=12, day=31), 200), + ], + TimestampArrayObject + ), + ((-1, [(6001, 1), (6002, 2), (6003, 3)]), BinaryEnumArrayObject), + + # object array + ((ObjectArrayObject.OBJECT, [1, 2, decimal.Decimal('3')]), ObjectArrayObject), + + # collection + ((CollectionObject.LINKED_LIST, [1, 2, 3]), None), + + # map + ((MapObject.HASH_MAP, {'key': 4, 5: 6.0}), None), + ((MapObject.LINKED_HASH_MAP, OrderedDict([('key', 4), (5, 6.0)])), None), +] + @pytest.mark.parametrize( 'value, value_hint', - [ - # integers - (42, None), - (42, ByteObject), - (42, ShortObject), - (42, IntObject), - - # floats - (3.1415, None), # True for Double but not Float - (3.5, FloatObject), - - # char is never autodetected - ('ы', CharObject), - ('カ', CharObject), - - # bool - (True, None), - (False, None), - (True, BoolObject), - (False, BoolObject), - - # arrays of integers - ([1, 2, 3, 5], None), - (b'buzz', ByteArrayObject), - (bytearray([7, 8, 8, 11]), None), - (bytearray([7, 8, 8, 11]), ByteArrayObject), - ([1, 2, 3, 5], ShortArrayObject), - ([1, 2, 3, 5], IntArrayObject), - - # arrays of floats - ([2.2, 4.4, 6.6], None), - ([2.5, 6.5], FloatArrayObject), - - # array of char - (['ы', 'カ'], CharArrayObject), - - # array of bool - ([True, False, True], None), - ([True, False], BoolArrayObject), - ([False, True], BoolArrayObject), - ([True, False, True, False], BoolArrayObject), - - # string - ('Little Mary had a lamb', None), - ('This is a test', String), - - # decimals - (decimal.Decimal('2.5'), None), - (decimal.Decimal('-1.3'), None), - - # uuid - (uuid.uuid4(), None), - - # date - (datetime(year=1998, month=4, day=6, hour=18, minute=30), None), - - # no autodetection for timestamp either - ( - (datetime(year=1998, month=4, day=6, hour=18, minute=30), 1000), - TimestampObject - ), - - # time - (timedelta(days=4, hours=4, minutes=24), None), - - # enum is useless in Python, except for interoperability with Java. - # Also no autodetection - ((5, 6), BinaryEnumObject), - - # arrays of standard types - (['String 1', 'String 2'], None), - (['Some of us are empty', None, 'But not the others'], None), - - ([decimal.Decimal('2.71828'), decimal.Decimal('100')], None), - ([decimal.Decimal('2.1'), None, decimal.Decimal('3.1415')], None), - - ([uuid.uuid4(), uuid.uuid4()], None), - ( - [ - datetime(year=2010, month=1, day=1), - datetime(year=2010, month=12, day=31), - ], - None, - ), - ([timedelta(minutes=30), timedelta(hours=2)], None), - ( - [ - (datetime(year=2010, month=1, day=1), 1000), - (datetime(year=2010, month=12, day=31), 200), - ], - TimestampArrayObject - ), - ((-1, [(6001, 1), (6002, 2), (6003, 3)]), BinaryEnumArrayObject), - - # object array - ((ObjectArrayObject.OBJECT, [1, 2, decimal.Decimal('3')]), ObjectArrayObject), - - # collection - ((CollectionObject.LINKED_LIST, [1, 2, 3]), None), - - # map - ((MapObject.HASH_MAP, {'key': 4, 5: 6.0}), None), - ((MapObject.LINKED_HASH_MAP, OrderedDict([('key', 4), (5, 6.0)])), None), - ] + put_get_data_params ) -def test_put_get_data(client, cache, value, value_hint): +def test_put_get_data(cache, value, value_hint): + cache.put('my_key', value, value_hint=value_hint) + assert cache.get('my_key') == value - conn = client.random_node - result = cache_put(conn, cache, 'my_key', value, value_hint=value_hint) - assert result.status == 0 +@pytest.mark.parametrize( + 'value, value_hint', + put_get_data_params +) +@pytest.mark.asyncio +async def test_put_get_data_async(async_cache, value, value_hint): + await async_cache.put('my_key', value, value_hint=value_hint) + assert await async_cache.get('my_key') == value - result = cache_get(conn, cache, 'my_key') - assert result.status == 0 - assert result.value == value - if isinstance(result.value, list): - for res, val in zip(result.value, value): - assert type(res) == type(val) +bytearray_params = [ + [1, 2, 3, 5], + (7, 8, 13, 18), + (-128, -1, 0, 1, 127, 255), +] @pytest.mark.parametrize( 'value', - [ - [1, 2, 3, 5], - (7, 8, 13, 18), - (-128, -1, 0, 1, 127, 255), - ] + bytearray_params ) -def test_bytearray_from_list_or_tuple(client, cache, value): +def test_bytearray_from_list_or_tuple(cache, value): """ ByteArrayObject's pythonic type is `bytearray`, but it should also accept lists or tuples as a content. """ - conn = client.random_node + cache.put('my_key', value, value_hint=ByteArrayObject) + + assert cache.get('my_key') == bytearray([unsigned(ch, ctypes.c_ubyte) for ch in value]) + + +@pytest.mark.parametrize( + 'value', + bytearray_params +) +@pytest.mark.asyncio +async def test_bytearray_from_list_or_tuple_async(async_cache, value): + """ + ByteArrayObject's pythonic type is `bytearray`, but it should also accept + lists or tuples as a content. + """ + + await async_cache.put('my_key', value, value_hint=ByteArrayObject) + + result = await async_cache.get('my_key') + assert result == bytearray([unsigned(ch, ctypes.c_ubyte) for ch in value]) - result = cache_put( - conn, cache, 'my_key', value, value_hint=ByteArrayObject - ) - assert result.status == 0 - result = cache_get(conn, cache, 'my_key') - assert result.status == 0 - assert result.value == bytearray([ - unsigned(ch, ctypes.c_ubyte) for ch in value - ]) +uuid_params = [ + 'd57babad-7bc1-4c82-9f9c-e72841b92a85', + '5946c0c0-2b76-479d-8694-a2e64a3968da', + 'a521723d-ad5d-46a6-94ad-300f850ef704', +] + +uuid_table_create_sql = "CREATE TABLE test_uuid_repr (id INTEGER PRIMARY KEY, uuid_field UUID)" +uuid_table_drop_sql = "DROP TABLE test_uuid_repr IF EXISTS" +uuid_table_insert_sql = "INSERT INTO test_uuid_repr(id, uuid_field) VALUES (?, ?)" +uuid_table_query_sql = "SELECT * FROM test_uuid_repr WHERE uuid_field=?" + + +@pytest.fixture() +async def uuid_table(client): + client.sql(uuid_table_drop_sql) + client.sql(uuid_table_create_sql) + yield None + client.sql(uuid_table_drop_sql) + + +@pytest.fixture() +async def uuid_table_async(async_client): + await async_client.sql(uuid_table_drop_sql) + await async_client.sql(uuid_table_create_sql) + yield None + await async_client.sql(uuid_table_drop_sql) @pytest.mark.parametrize( 'uuid_string', - [ - 'd57babad-7bc1-4c82-9f9c-e72841b92a85', - '5946c0c0-2b76-479d-8694-a2e64a3968da', - 'a521723d-ad5d-46a6-94ad-300f850ef704', - ] + uuid_params ) -def test_uuid_representation(client, uuid_string): +def test_uuid_representation(client, uuid_string, uuid_table): """ Test if textual UUID representation is correct. """ uuid_value = uuid.UUID(uuid_string) - # initial cleanup - client.sql("DROP TABLE test_uuid_repr IF EXISTS") - # create table with UUID field - client.sql( - "CREATE TABLE test_uuid_repr (id INTEGER PRIMARY KEY, uuid_field UUID)" - ) # use uuid.UUID class to insert data - client.sql( - "INSERT INTO test_uuid_repr(id, uuid_field) VALUES (?, ?)", - query_args=[1, uuid_value] - ) + client.sql(uuid_table_insert_sql, query_args=[1, uuid_value]) # use hex string to retrieve data - result = client.sql( - "SELECT * FROM test_uuid_repr WHERE uuid_field='{}'".format( - uuid_string - ) - ) - - # finalize query - result = list(result) - - # final cleanup - client.sql("DROP TABLE test_uuid_repr IF EXISTS") - - # if a line was retrieved, our test was successful - assert len(result) == 1 - # doublecheck - assert result[0][1] == uuid_value + with client.sql(uuid_table_query_sql, query_args=[str(uuid_value)]) as cursor: + result = list(cursor) + + # if a line was retrieved, our test was successful + assert len(result) == 1 + assert result[0][1] == uuid_value + + +@pytest.mark.parametrize( + 'uuid_string', + uuid_params +) +@pytest.mark.asyncio +async def test_uuid_representation_async(async_client, uuid_string, uuid_table_async): + """ Test if textual UUID representation is correct. """ + uuid_value = uuid.UUID(uuid_string) + + # use uuid.UUID class to insert data + await async_client.sql(uuid_table_insert_sql, query_args=[1, uuid_value]) + # use hex string to retrieve data + async with async_client.sql(uuid_table_query_sql, query_args=[str(uuid_value)]) as cursor: + result = [row async for row in cursor] + + # if a line was retrieved, our test was successful + assert len(result) == 1 + assert result[0][1] == uuid_value diff --git a/tests/common/test_generic_object.py b/tests/common/test_generic_object.py index 73dc870..d6c0ee1 100644 --- a/tests/common/test_generic_object.py +++ b/tests/common/test_generic_object.py @@ -14,11 +14,10 @@ # limitations under the License. from pyignite import GenericObjectMeta -from pyignite.datatypes import * +from pyignite.datatypes import IntObject, String def test_go(): - class GenericObject( metaclass=GenericObjectMeta, schema={ diff --git a/tests/common/test_get_names.py b/tests/common/test_get_names.py index 2d6c0bc..7fcb499 100644 --- a/tests/common/test_get_names.py +++ b/tests/common/test_get_names.py @@ -12,21 +12,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import asyncio -from pyignite.api import cache_create, cache_get_names +import pytest def test_get_names(client): + bucket_names = {'my_bucket', 'my_bucket_2', 'my_bucket_3'} + for name in bucket_names: + client.get_or_create_cache(name) - conn = client.random_node + assert set(client.get_cache_names()) == bucket_names - bucket_names = ['my_bucket', 'my_bucket_2', 'my_bucket_3'] - for name in bucket_names: - cache_create(conn, name) - result = cache_get_names(conn) - assert result.status == 0 - assert type(result.value) == list - assert len(result.value) >= len(bucket_names) - for i, name in enumerate(bucket_names): - assert name in result.value +@pytest.mark.asyncio +async def test_get_names_async(async_client): + bucket_names = {'my_bucket', 'my_bucket_2', 'my_bucket_3'} + await asyncio.gather(*[async_client.get_or_create_cache(name) for name in bucket_names]) + + assert set(await async_client.get_cache_names()) == bucket_names diff --git a/tests/common/test_key_value.py b/tests/common/test_key_value.py index a7edce1..0f492a2 100644 --- a/tests/common/test_key_value.py +++ b/tests/common/test_key_value.py @@ -15,426 +15,405 @@ from datetime import datetime -from pyignite.api import * -from pyignite.datatypes import ( - CollectionObject, IntObject, MapObject, TimestampObject, -) +import pytest +from pyignite.datatypes import CollectionObject, IntObject, MapObject, TimestampObject -def test_put_get(client, cache): - conn = client.random_node +def test_put_get(cache): + cache.put('my_key', 5) - result = cache_put(conn, cache, 'my_key', 5) - assert result.status == 0 + assert cache.get('my_key') == 5 - result = cache_get(conn, cache, 'my_key') - assert result.status == 0 - assert result.value == 5 +@pytest.mark.asyncio +async def test_put_get_async(async_cache): + await async_cache.put('my_key', 5) -def test_get_all(client, cache): + assert await async_cache.get('my_key') == 5 - conn = client.random_node - result = cache_get_all(conn, cache, ['key_1', 2, (3, IntObject)]) - assert result.status == 0 - assert result.value == {} +def test_get_all(cache): + assert cache.get_all(['key_1', 2, (3, IntObject)]) == {} - cache_put(conn, cache, 'key_1', 4) - cache_put(conn, cache, 3, 18, key_hint=IntObject) + cache.put('key_1', 4) + cache.put(3, 18, key_hint=IntObject) - result = cache_get_all(conn, cache, ['key_1', 2, (3, IntObject)]) - assert result.status == 0 - assert result.value == {'key_1': 4, 3: 18} + assert cache.get_all(['key_1', 2, (3, IntObject)]) == {'key_1': 4, 3: 18} -def test_put_all(client, cache): +@pytest.mark.asyncio +async def test_get_all_async(async_cache): + assert await async_cache.get_all(['key_1', 2, (3, IntObject)]) == {} - conn = client.random_node + await async_cache.put('key_1', 4) + await async_cache.put(3, 18, key_hint=IntObject) + assert await async_cache.get_all(['key_1', 2, (3, IntObject)]) == {'key_1': 4, 3: 18} + + +def test_put_all(cache): test_dict = { 1: 2, 'key_1': 4, (3, IntObject): 18, } - test_keys = ['key_1', 1, 3] - - result = cache_put_all(conn, cache, test_dict) - assert result.status == 0 - - result = cache_get_all(conn, cache, test_keys) - assert result.status == 0 - assert len(test_dict) == 3 - - for key in result.value: - assert key in test_keys - - -def test_contains_key(client, cache): - - conn = client.random_node - - cache_put(conn, cache, 'test_key', 42) - - result = cache_contains_key(conn, cache, 'test_key') - assert result.value is True - - result = cache_contains_key(conn, cache, 'non-existant-key') - assert result.value is False - - -def test_contains_keys(client, cache): - - conn = client.random_node - - cache_put(conn, cache, 5, 6) - cache_put(conn, cache, 'test_key', 42) + cache.put_all(test_dict) - result = cache_contains_keys(conn, cache, [5, 'test_key']) - assert result.value is True + result = cache.get_all(list(test_dict.keys())) - result = cache_contains_keys(conn, cache, [5, 'non-existent-key']) - assert result.value is False + assert len(result) == len(test_dict) + for k, v in test_dict.items(): + k = k[0] if isinstance(k, tuple) else k + assert result[k] == v -def test_get_and_put(client, cache): - - conn = client.random_node - - result = cache_get_and_put(conn, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is None - - result = cache_get(conn, cache, 'test_key') - assert result.status == 0 - assert result.value is 42 - - result = cache_get_and_put(conn, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value == 42 - - -def test_get_and_replace(client, cache): - - conn = client.random_node - - result = cache_get_and_replace(conn, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is None - - result = cache_get(conn, cache, 'test_key') - assert result.status == 0 - assert result.value is None - - cache_put(conn, cache, 'test_key', 42) - - result = cache_get_and_replace(conn, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value == 42 - +@pytest.mark.asyncio +async def test_put_all_async(async_cache): + test_dict = { + 1: 2, + 'key_1': 4, + (3, IntObject): 18, + } + await async_cache.put_all(test_dict) -def test_get_and_remove(client, cache): + result = await async_cache.get_all(list(test_dict.keys())) - conn = client.random_node + assert len(result) == len(test_dict) + for k, v in test_dict.items(): + k = k[0] if isinstance(k, tuple) else k + assert result[k] == v - result = cache_get_and_remove(conn, cache, 'test_key') - assert result.status == 0 - assert result.value is None - cache_put(conn, cache, 'test_key', 42) +def test_contains_key(cache): + cache.put('test_key', 42) - result = cache_get_and_remove(conn, cache, 'test_key') - assert result.status == 0 - assert result.value == 42 + assert cache.contains_key('test_key') + assert not cache.contains_key('non-existent-key') -def test_put_if_absent(client, cache): +@pytest.mark.asyncio +async def test_contains_key_async(async_cache): + await async_cache.put('test_key', 42) - conn = client.random_node + assert await async_cache.contains_key('test_key') + assert not await async_cache.contains_key('non-existent-key') - result = cache_put_if_absent(conn, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is True - result = cache_put_if_absent(conn, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value is False +def test_contains_keys(cache): + cache.put(5, 6) + cache.put('test_key', 42) + assert cache.contains_keys([5, 'test_key']) + assert not cache.contains_keys([5, 'non-existent-key']) -def test_get_and_put_if_absent(client, cache): - conn = client.random_node +@pytest.mark.asyncio +async def test_contains_keys_async(async_cache): + await async_cache.put(5, 6) + await async_cache.put('test_key', 42) - result = cache_get_and_put_if_absent(conn, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is None + assert await async_cache.contains_keys([5, 'test_key']) + assert not await async_cache.contains_keys([5, 'non-existent-key']) - result = cache_get_and_put_if_absent(conn, cache, 'test_key', 1234) - assert result.status == 0 - assert result.value == 42 - result = cache_get_and_put_if_absent(conn, cache, 'test_key', 5678) - assert result.status == 0 - assert result.value == 42 +def test_get_and_put(cache): + assert cache.get_and_put('test_key', 42) is None + assert cache.get('test_key') == 42 + assert cache.get_and_put('test_key', 1234) == 42 + assert cache.get('test_key') == 1234 -def test_replace(client, cache): +@pytest.mark.asyncio +async def test_get_and_put_async(async_cache): + assert await async_cache.get_and_put('test_key', 42) is None + assert await async_cache.get('test_key') == 42 + assert await async_cache.get_and_put('test_key', 1234) == 42 + assert await async_cache.get('test_key') == 1234 - conn = client.random_node - result = cache_replace(conn, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is False +def test_get_and_replace(cache): + assert cache.get_and_replace('test_key', 42) is None + assert cache.get('test_key') is None + cache.put('test_key', 42) + assert cache.get_and_replace('test_key', 1234) == 42 - cache_put(conn, cache, 'test_key', 1234) - result = cache_replace(conn, cache, 'test_key', 42) - assert result.status == 0 - assert result.value is True +@pytest.mark.asyncio +async def test_get_and_replace_async(async_cache): + assert await async_cache.get_and_replace('test_key', 42) is None + assert await async_cache.get('test_key') is None + await async_cache.put('test_key', 42) + assert await async_cache.get_and_replace('test_key', 1234) == 42 - result = cache_get(conn, cache, 'test_key') - assert result.status == 0 - assert result.value == 42 +def test_get_and_remove(cache): + assert cache.get_and_remove('test_key') is None + cache.put('test_key', 42) + assert cache.get_and_remove('test_key') == 42 + assert cache.get_and_remove('test_key') is None -def test_replace_if_equals(client, cache): - conn = client.random_node +@pytest.mark.asyncio +async def test_get_and_remove_async(async_cache): + assert await async_cache.get_and_remove('test_key') is None + await async_cache.put('test_key', 42) + assert await async_cache.get_and_remove('test_key') == 42 + assert await async_cache.get_and_remove('test_key') is None - result = cache_replace_if_equals(conn, cache, 'my_test', 42, 1234) - assert result.status == 0 - assert result.value is False - cache_put(conn, cache, 'my_test', 42) +def test_put_if_absent(cache): + assert cache.put_if_absent('test_key', 42) + assert not cache.put_if_absent('test_key', 1234) - result = cache_replace_if_equals(conn, cache, 'my_test', 42, 1234) - assert result.status == 0 - assert result.value is True - result = cache_get(conn, cache, 'my_test') - assert result.status == 0 - assert result.value == 1234 +@pytest.mark.asyncio +async def test_put_if_absent_async(async_cache): + assert await async_cache.put_if_absent('test_key', 42) + assert not await async_cache.put_if_absent('test_key', 1234) -def test_clear(client, cache): +def test_get_and_put_if_absent(cache): + assert cache.get_and_put_if_absent('test_key', 42) is None + assert cache.get_and_put_if_absent('test_key', 1234) == 42 + assert cache.get_and_put_if_absent('test_key', 5678) == 42 + assert cache.get('test_key') == 42 - conn = client.random_node - result = cache_put(conn, cache, 'my_test', 42) - assert result.status == 0 +@pytest.mark.asyncio +async def test_get_and_put_if_absent_async(async_cache): + assert await async_cache.get_and_put_if_absent('test_key', 42) is None + assert await async_cache.get_and_put_if_absent('test_key', 1234) == 42 + assert await async_cache.get_and_put_if_absent('test_key', 5678) == 42 + assert await async_cache.get('test_key') == 42 - result = cache_clear(conn, cache) - assert result.status == 0 - result = cache_get(conn, cache, 'my_test') - assert result.status == 0 - assert result.value is None +def test_replace(cache): + assert cache.replace('test_key', 42) is False + cache.put('test_key', 1234) + assert cache.replace('test_key', 42) is True + assert cache.get('test_key') == 42 -def test_clear_key(client, cache): +@pytest.mark.asyncio +async def test_replace_async(async_cache): + assert await async_cache.replace('test_key', 42) is False + await async_cache.put('test_key', 1234) + assert await async_cache.replace('test_key', 42) is True + assert await async_cache.get('test_key') == 42 - conn = client.random_node - result = cache_put(conn, cache, 'my_test', 42) - assert result.status == 0 +def test_replace_if_equals(cache): + assert cache.replace_if_equals('my_test', 42, 1234) is False + cache.put('my_test', 42) + assert cache.replace_if_equals('my_test', 42, 1234) is True + assert cache.get('my_test') == 1234 - result = cache_put(conn, cache, 'another_test', 24) - assert result.status == 0 - result = cache_clear_key(conn, cache, 'my_test') - assert result.status == 0 +@pytest.mark.asyncio +async def test_replace_if_equals_async(async_cache): + assert await async_cache.replace_if_equals('my_test', 42, 1234) is False + await async_cache.put('my_test', 42) + assert await async_cache.replace_if_equals('my_test', 42, 1234) is True + assert await async_cache.get('my_test') == 1234 - result = cache_get(conn, cache, 'my_test') - assert result.status == 0 - assert result.value is None - result = cache_get(conn, cache, 'another_test') - assert result.status == 0 - assert result.value == 24 +def test_clear(cache): + cache.put('my_test', 42) + cache.clear() + assert cache.get('my_test') is None -def test_clear_keys(client, cache): +@pytest.mark.asyncio +async def test_clear_async(async_cache): + await async_cache.put('my_test', 42) + await async_cache.clear() + assert await async_cache.get('my_test') is None - conn = client.random_node - result = cache_put(conn, cache, 'my_test_key', 42) - assert result.status == 0 +def test_clear_key(cache): + cache.put('my_test', 42) + cache.put('another_test', 24) - result = cache_put(conn, cache, 'another_test', 24) - assert result.status == 0 + cache.clear_key('my_test') - result = cache_clear_keys(conn, cache, [ - 'my_test_key', - 'nonexistent_key', - ]) - assert result.status == 0 + assert cache.get('my_test') is None + assert cache.get('another_test') == 24 - result = cache_get(conn, cache, 'my_test_key') - assert result.status == 0 - assert result.value is None - result = cache_get(conn, cache, 'another_test') - assert result.status == 0 - assert result.value == 24 +@pytest.mark.asyncio +async def test_clear_key_async(async_cache): + await async_cache.put('my_test', 42) + await async_cache.put('another_test', 24) + await async_cache.clear_key('my_test') -def test_remove_key(client, cache): + assert await async_cache.get('my_test') is None + assert await async_cache.get('another_test') == 24 - conn = client.random_node - result = cache_put(conn, cache, 'my_test_key', 42) - assert result.status == 0 +def test_clear_keys(cache): + cache.put('my_test_key', 42) + cache.put('another_test', 24) - result = cache_remove_key(conn, cache, 'my_test_key') - assert result.status == 0 - assert result.value is True + cache.clear_keys(['my_test_key', 'nonexistent_key']) - result = cache_remove_key(conn, cache, 'non_existent_key') - assert result.status == 0 - assert result.value is False + assert cache.get('my_test_key') is None + assert cache.get('another_test') == 24 -def test_remove_if_equals(client, cache): +@pytest.mark.asyncio +async def test_clear_keys_async(async_cache): + await async_cache.put('my_test_key', 42) + await async_cache.put('another_test', 24) - conn = client.random_node + await async_cache.clear_keys(['my_test_key', 'nonexistent_key']) - result = cache_put(conn, cache, 'my_test', 42) - assert result.status == 0 + assert await async_cache.get('my_test_key') is None + assert await async_cache.get('another_test') == 24 - result = cache_remove_if_equals(conn, cache, 'my_test', 1234) - assert result.status == 0 - assert result.value is False - result = cache_remove_if_equals(conn, cache, 'my_test', 42) - assert result.status == 0 - assert result.value is True +def test_remove_key(cache): + cache.put('my_test_key', 42) + assert cache.remove_key('my_test_key') is True + assert cache.remove_key('non_existent_key') is False - result = cache_get(conn, cache, 'my_test') - assert result.status == 0 - assert result.value is None +@pytest.mark.asyncio +async def test_remove_key_async(async_cache): + await async_cache.put('my_test_key', 42) + assert await async_cache.remove_key('my_test_key') is True + assert await async_cache.remove_key('non_existent_key') is False -def test_remove_keys(client, cache): - conn = client.random_node +def test_remove_if_equals(cache): + cache.put('my_test', 42) + assert cache.remove_if_equals('my_test', 1234) is False + assert cache.remove_if_equals('my_test', 42) is True + assert cache.get('my_test') is None - result = cache_put(conn, cache, 'my_test', 42) - assert result.status == 0 - result = cache_put(conn, cache, 'another_test', 24) - assert result.status == 0 +@pytest.mark.asyncio +async def test_remove_if_equals_async(async_cache): + await async_cache.put('my_test', 42) + assert await async_cache.remove_if_equals('my_test', 1234) is False + assert await async_cache.remove_if_equals('my_test', 42) is True + assert await async_cache.get('my_test') is None - result = cache_remove_keys(conn, cache, ['my_test', 'non_existent']) - assert result.status == 0 - result = cache_get(conn, cache, 'my_test') - assert result.status == 0 - assert result.value is None +def test_remove_keys(cache): + cache.put('my_test', 42) - result = cache_get(conn, cache, 'another_test') - assert result.status == 0 - assert result.value == 24 + cache.put('another_test', 24) + cache.remove_keys(['my_test', 'non_existent']) + assert cache.get('my_test') is None + assert cache.get('another_test') == 24 -def test_remove_all(client, cache): - conn = client.random_node +@pytest.mark.asyncio +async def test_remove_keys_async(async_cache): + await async_cache.put('my_test', 42) - result = cache_put(conn, cache, 'my_test', 42) - assert result.status == 0 + await async_cache.put('another_test', 24) + await async_cache.remove_keys(['my_test', 'non_existent']) - result = cache_put(conn, cache, 'another_test', 24) - assert result.status == 0 + assert await async_cache.get('my_test') is None + assert await async_cache.get('another_test') == 24 - result = cache_remove_all(conn, cache) - assert result.status == 0 - result = cache_get(conn, cache, 'my_test') - assert result.status == 0 - assert result.value is None +def test_remove_all(cache): + cache.put('my_test', 42) + cache.put('another_test', 24) + cache.remove_all() - result = cache_get(conn, cache, 'another_test') - assert result.status == 0 - assert result.value is None + assert cache.get('my_test') is None + assert cache.get('another_test') is None -def test_cache_get_size(client, cache): +@pytest.mark.asyncio +async def test_remove_all_async(async_cache): + await async_cache.put('my_test', 42) + await async_cache.put('another_test', 24) + await async_cache.remove_all() - conn = client.random_node + assert await async_cache.get('my_test') is None + assert await async_cache.get('another_test') is None - result = cache_put(conn, cache, 'my_test', 42) - assert result.status == 0 - result = cache_get_size(conn, cache) - assert result.status == 0 - assert result.value == 1 +def test_cache_get_size(cache): + cache.put('my_test', 42) + assert cache.get_size() == 1 -def test_put_get_collection(client): +@pytest.mark.asyncio +async def test_cache_get_size_async(async_cache): + await async_cache.put('my_test', 42) + assert await async_cache.get_size() == 1 - test_datetime = datetime(year=1996, month=3, day=1) - cache = client.get_or_create_cache('test_coll_cache') - cache.put( +collection_params = [ + [ 'simple', - ( - 1, - [ - (123, IntObject), - 678, - None, - 55.2, - ((test_datetime, 0), TimestampObject), - ] - ), - value_hint=CollectionObject - ) - value = cache.get('simple') - assert value == (1, [123, 678, None, 55.2, (test_datetime, 0)]) - - cache.put( + (1, [(123, IntObject), 678, None, 55.2, ((datetime(year=1996, month=3, day=1), 0), TimestampObject)]), + (1, [123, 678, None, 55.2, (datetime(year=1996, month=3, day=1), 0)]) + ], + [ 'nested', - ( - 1, - [ - 123, - ((1, [456, 'inner_test_string', 789]), CollectionObject), - 'outer_test_string', - ] - ), - value_hint=CollectionObject - ) - value = cache.get('nested') - assert value == ( - 1, - [ - 123, - (1, [456, 'inner_test_string', 789]), - 'outer_test_string' - ] - ) - - -def test_put_get_map(client): - - cache = client.get_or_create_cache('test_map_cache') - - cache.put( - 'test_map', + (1, [123, ((1, [456, 'inner_test_string', 789]), CollectionObject), 'outer_test_string']), + (1, [123, (1, [456, 'inner_test_string', 789]), 'outer_test_string']) + ], + [ + 'hash_map', ( MapObject.HASH_MAP, { (123, IntObject): 'test_data', 456: ((1, [456, 'inner_test_string', 789]), CollectionObject), 'test_key': 32.4, + 'simple_strings': ['string_1', 'string_2'] + } + ), + ( + MapObject.HASH_MAP, + { + 123: 'test_data', + 456: (1, [456, 'inner_test_string', 789]), + 'test_key': 32.4, + 'simple_strings': ['string_1', 'string_2'] + } + ) + ], + [ + 'linked_hash_map', + ( + MapObject.LINKED_HASH_MAP, + { + 'test_data': 12345, + 456: ['string_1', 'string_2'], + 'test_key': 32.4 } ), - value_hint=MapObject - ) - value = cache.get('test_map') - assert value == (MapObject.HASH_MAP, { - 123: 'test_data', - 456: (1, [456, 'inner_test_string', 789]), - 'test_key': 32.4, - }) + ( + MapObject.LINKED_HASH_MAP, + { + 'test_data': 12345, + 456: ['string_1', 'string_2'], + 'test_key': 32.4 + } + ) + ], +] + + +@pytest.mark.parametrize(['key', 'hinted_value', 'value'], collection_params) +def test_put_get_collection(cache, key, hinted_value, value): + cache.put(key, hinted_value) + assert cache.get(key) == value + + +@pytest.mark.parametrize(['key', 'hinted_value', 'value'], collection_params) +@pytest.mark.asyncio +async def test_put_get_collection_async(async_cache, key, hinted_value, value): + await async_cache.put(key, hinted_value) + assert await async_cache.get(key) == value diff --git a/tests/common/test_scan.py b/tests/common/test_scan.py index 2f0e056..d55fd3e 100644 --- a/tests/common/test_scan.py +++ b/tests/common/test_scan.py @@ -12,57 +12,153 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from collections import OrderedDict -from pyignite.api import ( - scan, scan_cursor_get_page, resource_close, cache_put_all, -) +import pytest +from pyignite import GenericObjectMeta +from pyignite.api import resource_close, resource_close_async +from pyignite.connection import AioConnection +from pyignite.datatypes import IntObject, String +from pyignite.exceptions import CacheError -def test_scan(client, cache): - conn = client.random_node - page_size = 10 +class SimpleObject( + metaclass=GenericObjectMeta, + type_name='SimpleObject', + schema=OrderedDict([ + ('id', IntObject), + ('str', String), + ]) +): + pass - result = cache_put_all(conn, cache, { - 'key_{}'.format(v): v for v in range(page_size * 2) - }) - assert result.status == 0 - result = scan(conn, cache, page_size) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is True +page_size = 10 - cursor = result.value['cursor'] - result = scan_cursor_get_page(conn, cursor) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is False +@pytest.fixture +def test_objects_data(): + yield {i: SimpleObject(id=i, str=f'str_{i}') for i in range(page_size * 2)} - result = scan_cursor_get_page(conn, cursor) - assert result.status != 0 +@pytest.mark.asyncio +def test_scan_objects(cache, test_objects_data): + cache.put_all(test_objects_data) -def test_close_resource(client, cache): + for p_sz in [page_size, page_size * 2, page_size * 3, page_size + 5]: + with cache.scan(p_sz) as cursor: + result = {k: v for k, v in cursor} + assert result == test_objects_data - conn = client.random_node - page_size = 10 + __check_cursor_closed(cursor) - result = cache_put_all(conn, cache, { - 'key_{}'.format(v): v for v in range(page_size * 2) - }) - assert result.status == 0 + with pytest.raises(Exception): + with cache.scan(p_sz) as cursor: + for _ in cursor: + raise Exception - result = scan(conn, cache, page_size) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is True + __check_cursor_closed(cursor) - cursor = result.value['cursor'] + cursor = cache.scan(page_size) + assert {k: v for k, v in cursor} == test_objects_data + __check_cursor_closed(cursor) - result = resource_close(conn, cursor) - assert result.status == 0 - result = scan_cursor_get_page(conn, cursor) - assert result.status != 0 +@pytest.mark.asyncio +async def test_scan_objects_async(async_cache, test_objects_data): + await async_cache.put_all(test_objects_data) + + for p_sz in [page_size, page_size * 2, page_size * 3, page_size + 5]: + async with async_cache.scan(p_sz) as cursor: + result = {k: v async for k, v in cursor} + assert result == test_objects_data + + await __check_cursor_closed(cursor) + + with pytest.raises(Exception): + async with async_cache.scan(p_sz) as cursor: + async for _ in cursor: + raise Exception + + await __check_cursor_closed(cursor) + + cursor = await async_cache.scan(page_size) + assert {k: v async for k, v in cursor} == test_objects_data + + await __check_cursor_closed(cursor) + + +@pytest.fixture +def cache_scan_data(): + yield { + 1: 'This is a test', + 2: 'One more test', + 3: 'Foo', + 4: 'Buzz', + 5: 'Bar', + 6: 'Lorem ipsum', + 7: 'dolor sit amet', + 8: 'consectetur adipiscing elit', + 9: 'Nullam aliquet', + 10: 'nisl at ante', + 11: 'suscipit', + 12: 'ut cursus', + 13: 'metus interdum', + 14: 'Nulla tincidunt', + 15: 'sollicitudin iaculis', + } + + +@pytest.mark.parametrize('page_size', range(1, 17, 5)) +def test_cache_scan(cache, cache_scan_data, page_size): + cache.put_all(cache_scan_data) + + with cache.scan(page_size=page_size) as cursor: + assert {k: v for k, v in cursor} == cache_scan_data + + +@pytest.mark.parametrize('page_size', range(1, 17, 5)) +@pytest.mark.asyncio +async def test_cache_scan_async(async_cache, cache_scan_data, page_size): + await async_cache.put_all(cache_scan_data) + + async with async_cache.scan(page_size=page_size) as cursor: + assert {k: v async for k, v in cursor} == cache_scan_data + + +def test_uninitialized_cursor(cache, test_objects_data): + cache.put_all(test_objects_data) + + cursor = cache.scan(page_size) + for _ in cursor: + break + + cursor.close() + __check_cursor_closed(cursor) + + +@pytest.mark.asyncio +async def test_uninitialized_cursor_async(async_cache, test_objects_data): + await async_cache.put_all(test_objects_data) + + # iterating of non-awaited cursor. + with pytest.raises(CacheError): + cursor = async_cache.scan(page_size) + assert {k: v async for k, v in cursor} == test_objects_data + + cursor = await async_cache.scan(page_size) + assert {k: v async for k, v in cursor} == test_objects_data + await __check_cursor_closed(cursor) + + +def __check_cursor_closed(cursor): + async def check_async(): + result = await resource_close_async(cursor.connection, cursor.cursor_id) + assert result.status != 0 + + def check(): + result = resource_close(cursor.connection, cursor.cursor_id) + assert result.status != 0 + + return check_async() if isinstance(cursor.connection, AioConnection) else check() diff --git a/tests/common/test_sql.py b/tests/common/test_sql.py index cc68a02..0841b7f 100644 --- a/tests/common/test_sql.py +++ b/tests/common/test_sql.py @@ -15,160 +15,173 @@ import pytest -from pyignite.api import ( - sql_fields, sql_fields_cursor_get_page, - sql, sql_cursor_get_page, - cache_get_configuration, -) +from pyignite import AioClient +from pyignite.aio_cache import AioCache from pyignite.datatypes.cache_config import CacheMode -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_SQL_SCHEMA, PROP_QUERY_ENTITIES, PROP_CACHE_MODE from pyignite.exceptions import SQLError from pyignite.utils import entity_id -from pyignite.binary import unwrap_binary - -initial_data = [ - ('John', 'Doe', 5), - ('Jane', 'Roe', 4), - ('Joe', 'Bloggs', 4), - ('Richard', 'Public', 3), - ('Negidius', 'Numerius', 3), - ] -create_query = '''CREATE TABLE Student ( - id INT(11) PRIMARY KEY, - first_name CHAR(24), - last_name CHAR(32), - grade INT(11))''' - -insert_query = '''INSERT INTO Student(id, first_name, last_name, grade) -VALUES (?, ?, ?, ?)''' - -select_query = 'SELECT id, first_name, last_name, grade FROM Student' - -drop_query = 'DROP TABLE Student IF EXISTS' - -page_size = 4 - - -def test_sql(client): - - conn = client.random_node - - # cleanup - client.sql(drop_query) - - result = sql_fields( - conn, - 0, - create_query, - page_size, - schema='PUBLIC', - include_field_names=True - ) - assert result.status == 0, result.message - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = sql_fields( - conn, - 0, - insert_query, - page_size, - schema='PUBLIC', - query_args=[i, fname, lname, grade], - include_field_names=True - ) - assert result.status == 0, result.message - - result = cache_get_configuration(conn, 'SQL_PUBLIC_STUDENT') - assert result.status == 0, result.message - - binary_type_name = result.value[PROP_QUERY_ENTITIES][0]['value_type_name'] - result = sql( - conn, - 'SQL_PUBLIC_STUDENT', - binary_type_name, - 'TRUE', - page_size - ) - assert result.status == 0, result.message - assert len(result.value['data']) == page_size - assert result.value['more'] is True - - for wrapped_object in result.value['data'].values(): - data = unwrap_binary(client, wrapped_object) - assert data.type_id == entity_id(binary_type_name) - - cursor = result.value['cursor'] - - while result.value['more']: - result = sql_cursor_get_page(conn, cursor) - assert result.status == 0, result.message - - for wrapped_object in result.value['data'].values(): - data = unwrap_binary(client, wrapped_object) - assert data.type_id == entity_id(binary_type_name) - - # repeat cleanup - result = sql_fields(conn, 0, drop_query, page_size, schema='PUBLIC') - assert result.status == 0 - - -def test_sql_fields(client): - - conn = client.random_node - - # cleanup - client.sql(drop_query) - - result = sql_fields( - conn, - 0, - create_query, - page_size, - schema='PUBLIC', - include_field_names=True - ) - assert result.status == 0, result.message - - for i, data_line in enumerate(initial_data, start=1): - fname, lname, grade = data_line - result = sql_fields( - conn, - 0, - insert_query, - page_size, - schema='PUBLIC', - query_args=[i, fname, lname, grade], - include_field_names=True - ) - assert result.status == 0, result.message - - result = sql_fields( - conn, - 0, - select_query, - page_size, - schema='PUBLIC', - include_field_names=True - ) - assert result.status == 0 - assert len(result.value['data']) == page_size - assert result.value['more'] is True - - cursor = result.value['cursor'] - - result = sql_fields_cursor_get_page(conn, cursor, field_count=4) - assert result.status == 0 - assert len(result.value['data']) == len(initial_data) - page_size - assert result.value['more'] is False - - # repeat cleanup - result = sql_fields(conn, 0, drop_query, page_size, schema='PUBLIC') - assert result.status == 0 - - -def test_long_multipage_query(client): +student_table_data = [ + ('John', 'Doe', 5), + ('Jane', 'Roe', 4), + ('Joe', 'Bloggs', 4), + ('Richard', 'Public', 3), + ('Negidius', 'Numerius', 3), +] + +student_table_select_query = 'SELECT id, first_name, last_name, grade FROM Student ORDER BY ID ASC' + + +@pytest.fixture +def student_table_fixture(client): + yield from __create_student_table_fixture(client) + + +@pytest.fixture +async def async_student_table_fixture(async_client): + async for _ in __create_student_table_fixture(async_client): + yield + + +def __create_student_table_fixture(client): + create_query = '''CREATE TABLE Student ( + id INT(11) PRIMARY KEY, + first_name CHAR(24), + last_name CHAR(32), + grade INT(11))''' + + insert_query = '''INSERT INTO Student(id, first_name, last_name, grade) + VALUES (?, ?, ?, ?)''' + + drop_query = 'DROP TABLE Student IF EXISTS' + + def inner(): + client.sql(drop_query) + client.sql(create_query) + + for i, data_line in enumerate(student_table_data): + fname, lname, grade = data_line + client.sql(insert_query, query_args=[i, fname, lname, grade]) + + yield None + client.sql(drop_query) + + async def inner_async(): + await client.sql(drop_query) + await client.sql(create_query) + + for i, data_line in enumerate(student_table_data): + fname, lname, grade = data_line + await client.sql(insert_query, query_args=[i, fname, lname, grade]) + + yield None + await client.sql(drop_query) + + return inner_async() if isinstance(client, AioClient) else inner() + + +@pytest.mark.parametrize('page_size', range(1, 6, 2)) +def test_sql(client, student_table_fixture, page_size): + cache = client.get_cache('SQL_PUBLIC_STUDENT') + cache_config = cache.settings + + binary_type_name = cache_config[PROP_QUERY_ENTITIES][0]['value_type_name'] + + with cache.select_row('ORDER BY ID ASC', page_size=4) as cursor: + for i, row in enumerate(cursor): + k, v = row + assert k == i + + assert (v.FIRST_NAME, v.LAST_NAME, v.GRADE) == student_table_data[i] + assert v.type_id == entity_id(binary_type_name) + + +@pytest.mark.parametrize('page_size', range(1, 6, 2)) +def test_sql_fields(client, student_table_fixture, page_size): + with client.sql(student_table_select_query, page_size=page_size, include_field_names=True) as cursor: + for i, row in enumerate(cursor): + if i > 0: + assert tuple(row) == (i - 1,) + student_table_data[i - 1] + else: + assert row == ['ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'] + + +@pytest.mark.asyncio +@pytest.mark.parametrize('page_size', range(1, 6, 2)) +async def test_sql_fields_async(async_client, async_student_table_fixture, page_size): + async with async_client.sql(student_table_select_query, page_size=page_size, include_field_names=True) as cursor: + i = 0 + async for row in cursor: + if i > 0: + assert tuple(row) == (i - 1,) + student_table_data[i - 1] + else: + assert row == ['ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'] + i += 1 + + cursor = await async_client.sql(student_table_select_query, page_size=page_size, include_field_names=True) + try: + i = 0 + async for row in cursor: + if i > 0: + assert tuple(row) == (i - 1,) + student_table_data[i - 1] + else: + assert row == ['ID', 'FIRST_NAME', 'LAST_NAME', 'GRADE'] + i += 1 + finally: + await cursor.close() + + +multipage_fields = ["id", "abc", "ghi", "def", "jkl", "prs", "mno", "tuw", "zyz", "abc1", "def1", "jkl1", "prs1"] + + +@pytest.fixture +def long_multipage_table_fixture(client): + yield from __long_multipage_table_fixture(client) + + +@pytest.fixture +async def async_long_multipage_table_fixture(async_client): + async for _ in __long_multipage_table_fixture(async_client): + yield + + +def __long_multipage_table_fixture(client): + drop_query = 'DROP TABLE LongMultipageQuery IF EXISTS' + + create_query = "CREATE TABLE LongMultiPageQuery (%s, %s)" % ( + multipage_fields[0] + " INT(11) PRIMARY KEY", ",".join(map(lambda f: f + " INT(11)", multipage_fields[1:]))) + + insert_query = "INSERT INTO LongMultipageQuery (%s) VALUES (%s)" % ( + ",".join(multipage_fields), ",".join("?" * len(multipage_fields))) + + def query_args(_id): + return [_id] + list(i * _id for i in range(1, len(multipage_fields))) + + def inner(): + client.sql(drop_query) + client.sql(create_query) + + for i in range(1, 21): + client.sql(insert_query, query_args=query_args(i)) + yield None + + client.sql(drop_query) + + async def inner_async(): + await client.sql(drop_query) + await client.sql(create_query) + + for i in range(1, 21): + await client.sql(insert_query, query_args=query_args(i)) + yield None + + await client.sql(drop_query) + + return inner_async() if isinstance(client, AioClient) else inner() + + +def test_long_multipage_query(client, long_multipage_table_fixture): """ The test creates a table with 13 columns (id and 12 enumerated columns) and 20 records with id in range from 1 to 20. Values of enumerated columns @@ -177,25 +190,20 @@ def test_long_multipage_query(client): The goal is to ensure that all the values are selected in a right order. """ - fields = ["id", "abc", "ghi", "def", "jkl", "prs", "mno", "tuw", "zyz", "abc1", "def1", "jkl1", "prs1"] + with client.sql('SELECT * FROM LongMultipageQuery', page_size=1) as cursor: + for page in cursor: + assert len(page) == len(multipage_fields) + for field_number, value in enumerate(page[1:], start=1): + assert value == field_number * page[0] - client.sql('DROP TABLE LongMultipageQuery IF EXISTS') - client.sql("CREATE TABLE LongMultiPageQuery (%s, %s)" % - (fields[0] + " INT(11) PRIMARY KEY", ",".join(map(lambda f: f + " INT(11)", fields[1:])))) - - for id in range(1, 21): - client.sql( - "INSERT INTO LongMultipageQuery (%s) VALUES (%s)" % (",".join(fields), ",".join("?" * len(fields))), - query_args=[id] + list(i * id for i in range(1, len(fields)))) - - result = client.sql('SELECT * FROM LongMultipageQuery', page_size=1) - for page in result: - assert len(page) == len(fields) - for field_number, value in enumerate(page[1:], start=1): - assert value == field_number * page[0] - - client.sql(drop_query) +@pytest.mark.asyncio +async def test_long_multipage_query_async(async_client, async_long_multipage_table_fixture): + async with async_client.sql('SELECT * FROM LongMultipageQuery', page_size=1) as cursor: + async for page in cursor: + assert len(page) == len(multipage_fields) + for field_number, value in enumerate(page[1:], start=1): + assert value == field_number * page[0] def test_sql_not_create_cache_with_schema(client): @@ -203,20 +211,30 @@ def test_sql_not_create_cache_with_schema(client): client.sql(schema=None, cache='NOT_EXISTING', query_str='select * from NotExisting') +@pytest.mark.asyncio +async def test_sql_not_create_cache_with_schema_async(async_client): + with pytest.raises(SQLError, match=r".*Cache does not exist.*"): + await async_client.sql(schema=None, cache='NOT_EXISTING_ASYNC', query_str='select * from NotExistingAsync') + + def test_sql_not_create_cache_with_cache(client): with pytest.raises(SQLError, match=r".*Failed to set schema.*"): client.sql(schema='NOT_EXISTING', query_str='select * from NotExisting') -def test_query_with_cache(client): - test_key = 42 - test_value = 'Lorem ipsum' +@pytest.mark.asyncio +async def test_sql_not_create_cache_with_cache_async(async_client): + with pytest.raises(SQLError, match=r".*Failed to set schema.*"): + await async_client.sql(schema='NOT_EXISTING_ASYNC', query_str='select * from NotExistingAsync') - cache_name = test_query_with_cache.__name__.upper() + +@pytest.fixture +def indexed_cache_settings(): + cache_name = 'indexed_cache' schema_name = f'{cache_name}_schema'.upper() table_name = f'{cache_name}_table'.upper() - cache = client.create_cache({ + yield { PROP_NAME: cache_name, PROP_SQL_SCHEMA: schema_name, PROP_CACHE_MODE: CacheMode.PARTITIONED, @@ -243,18 +261,67 @@ def test_query_with_cache(client): ], }, ], - }) + } + + +@pytest.fixture +def indexed_cache_fixture(client, indexed_cache_settings): + cache_name = indexed_cache_settings[PROP_NAME] + schema_name = indexed_cache_settings[PROP_SQL_SCHEMA] + table_name = indexed_cache_settings[PROP_QUERY_ENTITIES][0]['table_name'] + + cache = client.create_cache(indexed_cache_settings) + + yield cache, cache_name, schema_name, table_name + cache.destroy() + + +@pytest.fixture +async def async_indexed_cache_fixture(async_client, indexed_cache_settings): + cache_name = indexed_cache_settings[PROP_NAME] + schema_name = indexed_cache_settings[PROP_SQL_SCHEMA] + table_name = indexed_cache_settings[PROP_QUERY_ENTITIES][0]['table_name'] + + cache = await async_client.create_cache(indexed_cache_settings) + + yield cache, cache_name, schema_name, table_name + await cache.destroy() + + +def test_query_with_cache(client, indexed_cache_fixture): + return __check_query_with_cache(client, indexed_cache_fixture) + + +@pytest.mark.asyncio +async def test_query_with_cache_async(async_client, async_indexed_cache_fixture): + return await __check_query_with_cache(async_client, async_indexed_cache_fixture) + - cache.put(test_key, test_value) +def __check_query_with_cache(client, cache_fixture): + test_key, test_value = 42, 'Lorem ipsum' + cache, cache_name, schema_name, table_name = cache_fixture + query = f'select value from {table_name}' args_to_check = [ ('schema', schema_name), ('cache', cache), - ('cache', cache.name), + ('cache', cache_name), ('cache', cache.cache_id) ] - for param, value in args_to_check: - page = client.sql(f'select value from {table_name}', **{param: value}) - received = next(page)[0] - assert test_value == received + def inner(): + cache.put(test_key, test_value) + for param, value in args_to_check: + with client.sql(query, **{param: value}) as cursor: + received = next(cursor)[0] + assert test_value == received + + async def async_inner(): + await cache.put(test_key, test_value) + for param, value in args_to_check: + async with client.sql(query, **{param: value}) as cursor: + row = await cursor.__anext__() + received = row[0] + assert test_value == received + + return async_inner() if isinstance(cache, AioCache) else inner() diff --git a/tests/common/test_sql_composite_key.py b/tests/common/test_sql_composite_key.py new file mode 100644 index 0000000..76de77e --- /dev/null +++ b/tests/common/test_sql_composite_key.py @@ -0,0 +1,168 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +from enum import Enum + +import pytest + +from pyignite import GenericObjectMeta, AioClient +from pyignite.datatypes import IntObject, String + + +class StudentKey( + metaclass=GenericObjectMeta, + type_name='test.model.StudentKey', + schema=OrderedDict([ + ('ID', IntObject), + ('DEPT', String) + ]) +): + pass + + +class Student( + metaclass=GenericObjectMeta, + type_name='test.model.Student', + schema=OrderedDict([ + ('NAME', String), + ]) +): + pass + + +create_query = '''CREATE TABLE StudentTable ( + id INT(11), + dept VARCHAR, + name CHAR(24), + PRIMARY KEY (id, dept)) + WITH "CACHE_NAME=StudentCache, KEY_TYPE=test.model.StudentKey, VALUE_TYPE=test.model.Student"''' + +insert_query = '''INSERT INTO StudentTable (id, dept, name) VALUES (?, ?, ?)''' + +select_query = 'SELECT id, dept, name FROM StudentTable' + +select_kv_query = 'SELECT _key, _val FROM StudentTable' + +drop_query = 'DROP TABLE StudentTable IF EXISTS' + + +@pytest.fixture +def student_table_fixture(client): + yield from __create_student_table_fixture(client) + + +@pytest.fixture +async def async_student_table_fixture(async_client): + async for _ in __create_student_table_fixture(async_client): + yield + + +def __create_student_table_fixture(client): + def inner(): + client.sql(drop_query) + client.sql(create_query) + yield None + client.sql(drop_query) + + async def inner_async(): + await client.sql(drop_query) + await client.sql(create_query) + yield None + await client.sql(drop_query) + + return inner_async() if isinstance(client, AioClient) else inner() + + +class InsertMode(Enum): + SQL = 1 + CACHE = 2 + + +@pytest.mark.parametrize('insert_mode', [InsertMode.SQL, InsertMode.CACHE]) +def test_sql_composite_key(client, insert_mode, student_table_fixture): + __perform_test(client, insert_mode) + + +@pytest.mark.asyncio +@pytest.mark.parametrize('insert_mode', [InsertMode.SQL, InsertMode.CACHE]) +async def test_sql_composite_key_async(async_client, insert_mode, async_student_table_fixture): + await __perform_test(async_client, insert_mode) + + +def __perform_test(client, insert=InsertMode.SQL): + student_key = StudentKey(2, 'Business') + student_val = Student('Abe') + + def validate_query_result(key, val, query_result): + """ + Compare query result with expected key and value. + """ + assert len(query_result) == 2 + sql_row = dict(zip(query_result[0], query_result[1])) + + assert sql_row['ID'] == key.ID + assert sql_row['DEPT'] == key.DEPT + assert sql_row['NAME'] == val.NAME + + def validate_kv_query_result(key, val, query_result): + """ + Compare query result with expected key and value. + """ + assert len(query_result) == 2 + sql_row = dict(zip(query_result[0], query_result[1])) + + sql_key, sql_val = sql_row['_KEY'], sql_row['_VAL'] + assert sql_key.ID == key.ID + assert sql_key.DEPT == key.DEPT + assert sql_val.NAME == val.NAME + + def inner(): + if insert == InsertMode.SQL: + result = client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME]) + assert next(result)[0] == 1 + else: + studentCache = client.get_cache('StudentCache') + studentCache.put(student_key, student_val) + val = studentCache.get(student_key) + assert val is not None + assert val.NAME == student_val.NAME + + query_result = list(client.sql(select_query, include_field_names=True)) + validate_query_result(student_key, student_val, query_result) + + query_result = list(client.sql(select_kv_query, include_field_names=True)) + validate_kv_query_result(student_key, student_val, query_result) + + async def inner_async(): + if insert == InsertMode.SQL: + result = await client.sql(insert_query, query_args=[student_key.ID, student_key.DEPT, student_val.NAME]) + assert (await result.__anext__())[0] == 1 + else: + studentCache = await client.get_cache('StudentCache') + await studentCache.put(student_key, student_val) + val = await studentCache.get(student_key) + assert val is not None + assert val.NAME == student_val.NAME + + async with client.sql(select_query, include_field_names=True) as cursor: + query_result = [r async for r in cursor] + validate_query_result(student_key, student_val, query_result) + + async with client.sql(select_kv_query, include_field_names=True) as cursor: + query_result = [r async for r in cursor] + validate_kv_query_result(student_key, student_val, query_result) + + return inner_async() if isinstance(client, AioClient) else inner() diff --git a/tests/conftest.py b/tests/conftest.py index 59b7d3a..65134fd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import asyncio + import pytest @@ -27,7 +29,7 @@ def run_examples(request): def skip_if_no_cext(request): skip = False try: - from pyignite import _cutils + from pyignite import _cutils # noqa: F401 except ImportError: if request.config.getoption('--force-cext'): pytest.fail("C extension failed to build, fail test because of --force-cext is set.") @@ -38,6 +40,14 @@ def skip_if_no_cext(request): pytest.skip('skipped c extensions test, c extension is not available.') +@pytest.fixture(scope='session') +def event_loop(): + """Create an instance of the default event loop for each test case.""" + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + def pytest_addoption(parser): parser.addoption( '--examples', diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py index 2dd19a0..4a1c52d 100644 --- a/tests/security/test_auth.py +++ b/tests/security/test_auth.py @@ -15,7 +15,7 @@ import pytest from pyignite.exceptions import AuthenticationError -from tests.util import start_ignite_gen, clear_ignite_work_dir, get_client +from tests.util import start_ignite_gen, clear_ignite_work_dir, get_client, get_client_async DEFAULT_IGNITE_USERNAME = 'ignite' DEFAULT_IGNITE_PASSWORD = 'ignite' @@ -47,13 +47,27 @@ def test_auth_success(with_ssl, ssl_params): assert all(node.alive for node in client._nodes) +@pytest.mark.asyncio +async def test_auth_success_async(with_ssl, ssl_params): + ssl_params['use_ssl'] = with_ssl + + async with get_client_async(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, + **ssl_params) as client: + await client.connect("127.0.0.1", 10801) + + assert all(node.alive for node in client._nodes) + + +auth_failed_params = [ + [DEFAULT_IGNITE_USERNAME, None], + ['invalid_user', 'invalid_password'], + [None, None] +] + + @pytest.mark.parametrize( 'username, password', - [ - [DEFAULT_IGNITE_USERNAME, None], - ['invalid_user', 'invalid_password'], - [None, None] - ] + auth_failed_params ) def test_auth_failed(username, password, with_ssl, ssl_params): ssl_params['use_ssl'] = with_ssl @@ -61,3 +75,16 @@ def test_auth_failed(username, password, with_ssl, ssl_params): with pytest.raises(AuthenticationError): with get_client(username=username, password=password, **ssl_params) as client: client.connect("127.0.0.1", 10801) + + +@pytest.mark.parametrize( + 'username, password', + auth_failed_params +) +@pytest.mark.asyncio +async def test_auth_failed_async(username, password, with_ssl, ssl_params): + ssl_params['use_ssl'] = with_ssl + + with pytest.raises(AuthenticationError): + async with get_client_async(username=username, password=password, **ssl_params) as client: + await client.connect("127.0.0.1", 10801) diff --git a/tests/security/test_ssl.py b/tests/security/test_ssl.py index 6463a03..32db98f 100644 --- a/tests/security/test_ssl.py +++ b/tests/security/test_ssl.py @@ -15,7 +15,7 @@ import pytest from pyignite.exceptions import ReconnectError -from tests.util import start_ignite_gen, get_client, get_or_create_cache +from tests.util import start_ignite_gen, get_client, get_or_create_cache, get_client_async, get_or_create_cache_async @pytest.fixture(scope='module', autouse=True) @@ -30,27 +30,58 @@ def test_connect_ssl_keystore_with_password(ssl_params_with_password): def test_connect_ssl(ssl_params): __test_connect_ssl(**ssl_params) -def __test_connect_ssl(**kwargs): + +@pytest.mark.asyncio +async def test_connect_ssl_keystore_with_password_async(ssl_params_with_password): + await __test_connect_ssl(is_async=True, **ssl_params_with_password) + + +@pytest.mark.asyncio +async def test_connect_ssl_async(ssl_params): + await __test_connect_ssl(is_async=True, **ssl_params) + + +def __test_connect_ssl(is_async=False, **kwargs): kwargs['use_ssl'] = True - with get_client(**kwargs) as client: - client.connect("127.0.0.1", 10801) + def inner(): + with get_client(**kwargs) as client: + client.connect("127.0.0.1", 10801) + + with get_or_create_cache(client, 'test-cache') as cache: + cache.put(1, 1) + + assert cache.get(1) == 1 - with get_or_create_cache(client, 'test-cache') as cache: - cache.put(1, 1) + async def inner_async(): + async with get_client_async(**kwargs) as client: + await client.connect("127.0.0.1", 10801) - assert cache.get(1) == 1 + async with get_or_create_cache_async(client, 'test-cache') as cache: + await cache.put(1, 1) + assert (await cache.get(1)) == 1 -@pytest.mark.parametrize( - 'invalid_ssl_params', - [ - {'use_ssl': False}, - {'use_ssl': True}, - {'use_ssl': True, 'ssl_keyfile': 'invalid.pem', 'ssl_certfile': 'invalid.pem'} - ] -) + return inner_async() if is_async else inner() + + +invalid_params = [ + {'use_ssl': False}, + {'use_ssl': True}, + {'use_ssl': True, 'ssl_keyfile': 'invalid.pem', 'ssl_certfile': 'invalid.pem'} +] + + +@pytest.mark.parametrize('invalid_ssl_params', invalid_params) def test_connection_error_with_incorrect_config(invalid_ssl_params): with pytest.raises(ReconnectError): with get_client(**invalid_ssl_params) as client: client.connect([("127.0.0.1", 10801)]) + + +@pytest.mark.parametrize('invalid_ssl_params', invalid_params) +@pytest.mark.asyncio +async def test_connection_error_with_incorrect_config_async(invalid_ssl_params): + with pytest.raises(ReconnectError): + async with get_client_async(**invalid_ssl_params) as client: + await client.connect([("127.0.0.1", 10801)]) diff --git a/tests/test_cutils.py b/tests/test_cutils.py index e7c095e..d66425f 100644 --- a/tests/test_cutils.py +++ b/tests/test_cutils.py @@ -27,8 +27,8 @@ _cutils_hashcode = _cutils.hashcode _cutils_schema_id = _cutils.schema_id except ImportError: - _cutils_hashcode = lambda x: None - _cutils_schema_id = lambda x: None + _cutils_hashcode = lambda x: None # noqa: E731 + _cutils_schema_id = lambda x: None # noqa: E731 pass diff --git a/tests/util.py b/tests/util.py index af4c324..f1243fc 100644 --- a/tests/util.py +++ b/tests/util.py @@ -12,8 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import asyncio import contextlib import glob +import inspect import os import shutil @@ -24,7 +26,12 @@ import subprocess import time -from pyignite import Client +from pyignite import Client, AioClient + +try: + from contextlib import asynccontextmanager +except ImportError: + from async_generator import asynccontextmanager @contextlib.contextmanager @@ -36,6 +43,15 @@ def get_client(**kwargs): client.close() +@asynccontextmanager +async def get_client_async(**kwargs): + client = AioClient(**kwargs) + try: + yield client + finally: + await client.close() + + @contextlib.contextmanager def get_or_create_cache(client, cache_name): cache = client.get_or_create_cache(cache_name) @@ -45,6 +61,15 @@ def get_or_create_cache(client, cache_name): cache.destroy() +@asynccontextmanager +async def get_or_create_cache_async(client, cache_name): + cache = await client.get_or_create_cache(cache_name) + try: + yield cache + finally: + await cache.destroy() + + def wait_for_condition(condition, interval=0.1, timeout=10, error=None): start = time.time() res = condition() @@ -62,6 +87,23 @@ def wait_for_condition(condition, interval=0.1, timeout=10, error=None): return False +async def wait_for_condition_async(condition, interval=0.1, timeout=10, error=None): + start = time.time() + res = await condition() if inspect.iscoroutinefunction(condition) else condition() + + while not res and time.time() - start < timeout: + await asyncio.sleep(interval) + res = await condition() if inspect.iscoroutinefunction(condition) else condition() + + if res: + return True + + if error is not None: + raise Exception(error) + + return False + + def is_windows(): return os.name == "nt" diff --git a/tox.ini b/tox.ini index 3ab8dea..90153da 100644 --- a/tox.ini +++ b/tox.ini @@ -15,7 +15,15 @@ [tox] skipsdist = True -envlist = py{36,37,38,39} +envlist = codestyle,py{36,37,38,39} + +[flake8] +max-line-length=120 +ignore = F401,F403,F405,F821 + +[testenv:codestyle] +basepython = python3.8 +commands = flake8 [testenv] passenv = TEAMCITY_VERSION IGNITE_HOME From 0bcd77f170f497a4430cf92844108e8390ee68f2 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Sat, 27 Mar 2021 00:21:02 +0300 Subject: [PATCH 21/62] IGNITE-13862 Add test case for put_all large amount of complex maps This closes #22 --- tests/common/test_key_value.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/common/test_key_value.py b/tests/common/test_key_value.py index 0f492a2..6e6df61 100644 --- a/tests/common/test_key_value.py +++ b/tests/common/test_key_value.py @@ -417,3 +417,27 @@ def test_put_get_collection(cache, key, hinted_value, value): async def test_put_get_collection_async(async_cache, key, hinted_value, value): await async_cache.put(key, hinted_value) assert await async_cache.get(key) == value + + +@pytest.fixture +def complex_map(): + return {"test" + str(i): ((MapObject.HASH_MAP, + {"key_1": ((1, ["value_1", 1.0]), CollectionObject), + "key_2": ((1, [["value_2_1", "1.0"], ["value_2_2", "0.25"]]), CollectionObject), + "key_3": ((1, [["value_3_1", "1.0"], ["value_3_2", "0.25"]]), CollectionObject), + "key_4": ((1, [["value_4_1", "1.0"], ["value_4_2", "0.25"]]), CollectionObject), + 'key_5': False, + "key_6": "value_6"}), MapObject) for i in range(10000)} + + +def test_put_all_large_complex_map(cache, complex_map): + cache.put_all(complex_map) + values = cache.get_all(complex_map.keys()) + assert len(values) == len(complex_map) + + +@pytest.mark.asyncio +async def test_put_all_large_complex_map_async(async_cache, complex_map): + await async_cache.put_all(complex_map) + values = await async_cache.get_all(complex_map.keys()) + assert len(values) == len(complex_map) From f00d70f4c139980ea31ec8cf5ed07c354a3cf0a6 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Mon, 29 Mar 2021 13:57:03 +0300 Subject: [PATCH 22/62] IGNITE-14432 Implement connection context managers for clients This closes #23 --- pyignite/aio_client.py | 20 ++++- pyignite/client.py | 15 ++++ tests/affinity/conftest.py | 13 ++- tests/affinity/test_affinity_bad_servers.py | 29 ++++--- .../test_connection_context_manager.py | 83 +++++++++++++++++++ tests/security/test_auth.py | 26 +++--- tests/security/test_ssl.py | 23 ++--- tests/util.py | 18 ---- 8 files changed, 166 insertions(+), 61 deletions(-) create mode 100644 tests/affinity/test_connection_context_manager.py diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index d882969..d2cc3ff 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -33,6 +33,22 @@ __all__ = ['AioClient'] +class _ConnectionContextManager: + def __init__(self, client, nodes): + self.client = client + self.nodes = nodes + + def __await__(self): + return (yield from self.__aenter__().__await__()) + + async def __aenter__(self): + await self.client._connect(self.nodes) + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.client.close() + + class AioClient(BaseClient): """ Asynchronous Client implementation. @@ -57,14 +73,16 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = False, * super().__init__(compact_footer, partition_aware, **kwargs) self._registry_mux = asyncio.Lock() - async def connect(self, *args): + def connect(self, *args): """ Connect to Ignite cluster node(s). :param args: (optional) host(s) and port(s) to connect to. """ nodes = self._process_connect_args(*args) + return _ConnectionContextManager(self, nodes) + async def _connect(self, nodes): for i, node in enumerate(nodes): host, port = node conn = AioConnection(self, host, port, **self._connection_args) diff --git a/pyignite/client.py b/pyignite/client.py index e4eef6a..05df617 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -243,6 +243,19 @@ def _get_from_registry(self, type_id, schema): return self._registry[type_id] +class _ConnectionContextManager: + def __init__(self, client, nodes): + self.client = client + self.nodes = nodes + self.client._connect(self.nodes) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.client.close() + + class Client(BaseClient): """ This is a main `pyignite` class, that is build upon the @@ -280,7 +293,9 @@ def connect(self, *args): :param args: (optional) host(s) and port(s) to connect to. """ nodes = self._process_connect_args(*args) + return _ConnectionContextManager(self, nodes) + def _connect(self, nodes): # the following code is quite twisted, because the protocol version # is initially unknown diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py index 2ec2b1b..e23e0e6 100644 --- a/tests/affinity/conftest.py +++ b/tests/affinity/conftest.py @@ -39,20 +39,25 @@ def server3(): @pytest.fixture -def client(): +def connection_param(): + return [('127.0.0.1', 10800 + i) for i in range(1, 4)] + + +@pytest.fixture +def client(connection_param): client = Client(partition_aware=True, timeout=CLIENT_SOCKET_TIMEOUT) try: - client.connect([('127.0.0.1', 10800 + i) for i in range(1, 4)]) + client.connect(connection_param) yield client finally: client.close() @pytest.fixture -async def async_client(): +async def async_client(connection_param): client = AioClient(partition_aware=True) try: - await client.connect([('127.0.0.1', 10800 + i) for i in range(1, 4)]) + await client.connect(connection_param) yield client finally: await client.close() diff --git a/tests/affinity/test_affinity_bad_servers.py b/tests/affinity/test_affinity_bad_servers.py index b169168..f5eec21 100644 --- a/tests/affinity/test_affinity_bad_servers.py +++ b/tests/affinity/test_affinity_bad_servers.py @@ -15,9 +15,10 @@ import pytest +from pyignite import Client, AioClient from pyignite.exceptions import ReconnectError, connection_errors from tests.affinity.conftest import CLIENT_SOCKET_TIMEOUT -from tests.util import start_ignite, kill_process_tree, get_client, get_client_async +from tests.util import start_ignite, kill_process_tree @pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) @@ -27,22 +28,24 @@ def with_partition_awareness(request): def test_client_with_multiple_bad_servers(with_partition_awareness): with pytest.raises(ReconnectError, match="Can not connect."): - with get_client(partition_aware=with_partition_awareness) as client: - client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) + client = Client(partition_aware=with_partition_awareness) + with client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]): + pass @pytest.mark.asyncio async def test_client_with_multiple_bad_servers_async(with_partition_awareness): with pytest.raises(ReconnectError, match="Can not connect."): - async with get_client_async(partition_aware=with_partition_awareness) as client: - await client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]) + client = AioClient(partition_aware=with_partition_awareness) + async with client.connect([("127.0.0.1", 10900), ("127.0.0.1", 10901)]): + pass def test_client_with_failed_server(request, with_partition_awareness): srv = start_ignite(idx=4) try: - with get_client(partition_aware=with_partition_awareness) as client: - client.connect([("127.0.0.1", 10804)]) + client = Client(partition_aware=with_partition_awareness) + with client.connect([("127.0.0.1", 10804)]): cache = client.get_or_create_cache(request.node.name) cache.put(1, 1) kill_process_tree(srv.pid) @@ -62,8 +65,8 @@ def test_client_with_failed_server(request, with_partition_awareness): async def test_client_with_failed_server_async(request, with_partition_awareness): srv = start_ignite(idx=4) try: - async with get_client_async(partition_aware=with_partition_awareness) as client: - await client.connect([("127.0.0.1", 10804)]) + client = AioClient(partition_aware=with_partition_awareness) + async with client.connect([("127.0.0.1", 10804)]): cache = await client.get_or_create_cache(request.node.name) await cache.put(1, 1) kill_process_tree(srv.pid) @@ -82,8 +85,8 @@ async def test_client_with_failed_server_async(request, with_partition_awareness def test_client_with_recovered_server(request, with_partition_awareness): srv = start_ignite(idx=4) try: - with get_client(partition_aware=with_partition_awareness, timeout=CLIENT_SOCKET_TIMEOUT) as client: - client.connect([("127.0.0.1", 10804)]) + client = Client(partition_aware=with_partition_awareness, timeout=CLIENT_SOCKET_TIMEOUT) + with client.connect([("127.0.0.1", 10804)]): cache = client.get_or_create_cache(request.node.name) cache.put(1, 1) @@ -108,8 +111,8 @@ def test_client_with_recovered_server(request, with_partition_awareness): async def test_client_with_recovered_server_async(request, with_partition_awareness): srv = start_ignite(idx=4) try: - async with get_client_async(partition_aware=with_partition_awareness) as client: - await client.connect([("127.0.0.1", 10804)]) + client = AioClient(partition_aware=with_partition_awareness) + async with client.connect([("127.0.0.1", 10804)]): cache = await client.get_or_create_cache(request.node.name) await cache.put(1, 1) diff --git a/tests/affinity/test_connection_context_manager.py b/tests/affinity/test_connection_context_manager.py new file mode 100644 index 0000000..8056c7d --- /dev/null +++ b/tests/affinity/test_connection_context_manager.py @@ -0,0 +1,83 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient + + +@pytest.fixture +def connection_param(): + return [('127.0.0.1', 10800 + i) for i in range(1, 4)] + + +@pytest.mark.parametrize('partition_aware', ['with_partition_aware', 'wo_partition_aware']) +def test_connection_context(connection_param, partition_aware): + is_partition_aware = partition_aware == 'with_partition_aware' + client = Client(partition_aware=is_partition_aware) + + # Check context manager + with client.connect(connection_param): + __check_open(client, is_partition_aware) + __check_closed(client) + + # Check standard way + try: + client.connect(connection_param) + __check_open(client, is_partition_aware) + finally: + client.close() + __check_closed(client) + + +@pytest.mark.asyncio +@pytest.mark.parametrize('partition_aware', ['with_partition_aware', 'wo_partition_aware']) +async def test_connection_context_async(connection_param, partition_aware): + is_partition_aware = partition_aware == 'with_partition_aware' + client = AioClient(partition_aware=is_partition_aware) + + # Check async context manager. + async with client.connect(connection_param): + await __check_open(client, is_partition_aware) + __check_closed(client) + + # Check standard way. + try: + await client.connect(connection_param) + await __check_open(client, is_partition_aware) + finally: + await client.close() + __check_closed(client) + + +def __check_open(client, is_partition_aware): + def inner_sync(): + if is_partition_aware: + assert client.random_node.alive + else: + all(n.alive for n in client._nodes) + + async def inner_async(): + if is_partition_aware: + random_node = await client.random_node() + assert random_node.alive + else: + all(n.alive for n in client._nodes) + + return inner_sync() if isinstance(client, Client) else inner_async() + + +def __check_closed(client): + assert all(not n.alive for n in client._nodes) diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py index 4a1c52d..b02f224 100644 --- a/tests/security/test_auth.py +++ b/tests/security/test_auth.py @@ -14,8 +14,9 @@ # limitations under the License. import pytest +from pyignite import Client, AioClient from pyignite.exceptions import AuthenticationError -from tests.util import start_ignite_gen, clear_ignite_work_dir, get_client, get_client_async +from tests.util import start_ignite_gen, clear_ignite_work_dir DEFAULT_IGNITE_USERNAME = 'ignite' DEFAULT_IGNITE_PASSWORD = 'ignite' @@ -40,21 +41,16 @@ def cleanup(): def test_auth_success(with_ssl, ssl_params): ssl_params['use_ssl'] = with_ssl - - with get_client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) as client: - client.connect("127.0.0.1", 10801) - + client = Client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) + with client.connect("127.0.0.1", 10801): assert all(node.alive for node in client._nodes) @pytest.mark.asyncio async def test_auth_success_async(with_ssl, ssl_params): ssl_params['use_ssl'] = with_ssl - - async with get_client_async(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, - **ssl_params) as client: - await client.connect("127.0.0.1", 10801) - + client = AioClient(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) + async with client.connect("127.0.0.1", 10801): assert all(node.alive for node in client._nodes) @@ -73,8 +69,9 @@ def test_auth_failed(username, password, with_ssl, ssl_params): ssl_params['use_ssl'] = with_ssl with pytest.raises(AuthenticationError): - with get_client(username=username, password=password, **ssl_params) as client: - client.connect("127.0.0.1", 10801) + client = Client(username=username, password=password, **ssl_params) + with client.connect("127.0.0.1", 10801): + pass @pytest.mark.parametrize( @@ -86,5 +83,6 @@ async def test_auth_failed_async(username, password, with_ssl, ssl_params): ssl_params['use_ssl'] = with_ssl with pytest.raises(AuthenticationError): - async with get_client_async(username=username, password=password, **ssl_params) as client: - await client.connect("127.0.0.1", 10801) + client = AioClient(username=username, password=password, **ssl_params) + async with client.connect("127.0.0.1", 10801): + pass diff --git a/tests/security/test_ssl.py b/tests/security/test_ssl.py index 32db98f..7736864 100644 --- a/tests/security/test_ssl.py +++ b/tests/security/test_ssl.py @@ -14,8 +14,9 @@ # limitations under the License. import pytest +from pyignite import Client, AioClient from pyignite.exceptions import ReconnectError -from tests.util import start_ignite_gen, get_client, get_or_create_cache, get_client_async, get_or_create_cache_async +from tests.util import start_ignite_gen, get_or_create_cache, get_or_create_cache_async @pytest.fixture(scope='module', autouse=True) @@ -45,18 +46,16 @@ def __test_connect_ssl(is_async=False, **kwargs): kwargs['use_ssl'] = True def inner(): - with get_client(**kwargs) as client: - client.connect("127.0.0.1", 10801) - + client = Client(**kwargs) + with client.connect("127.0.0.1", 10801): with get_or_create_cache(client, 'test-cache') as cache: cache.put(1, 1) assert cache.get(1) == 1 async def inner_async(): - async with get_client_async(**kwargs) as client: - await client.connect("127.0.0.1", 10801) - + client = AioClient(**kwargs) + async with client.connect("127.0.0.1", 10801): async with get_or_create_cache_async(client, 'test-cache') as cache: await cache.put(1, 1) @@ -75,13 +74,15 @@ async def inner_async(): @pytest.mark.parametrize('invalid_ssl_params', invalid_params) def test_connection_error_with_incorrect_config(invalid_ssl_params): with pytest.raises(ReconnectError): - with get_client(**invalid_ssl_params) as client: - client.connect([("127.0.0.1", 10801)]) + client = Client(**invalid_ssl_params) + with client.connect([("127.0.0.1", 10801)]): + pass @pytest.mark.parametrize('invalid_ssl_params', invalid_params) @pytest.mark.asyncio async def test_connection_error_with_incorrect_config_async(invalid_ssl_params): with pytest.raises(ReconnectError): - async with get_client_async(**invalid_ssl_params) as client: - await client.connect([("127.0.0.1", 10801)]) + client = AioClient(**invalid_ssl_params) + async with client.connect([("127.0.0.1", 10801)]): + pass diff --git a/tests/util.py b/tests/util.py index f1243fc..2ca898b 100644 --- a/tests/util.py +++ b/tests/util.py @@ -34,24 +34,6 @@ from async_generator import asynccontextmanager -@contextlib.contextmanager -def get_client(**kwargs): - client = Client(**kwargs) - try: - yield client - finally: - client.close() - - -@asynccontextmanager -async def get_client_async(**kwargs): - client = AioClient(**kwargs) - try: - yield client - finally: - await client.close() - - @contextlib.contextmanager def get_or_create_cache(client, cache_name): cache = client.get_or_create_cache(cache_name) From a7392fcfd5f56641d272fc12467b956635ca8fa7 Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Mon, 29 Mar 2021 14:06:12 +0300 Subject: [PATCH 23/62] IGNITE-14429 Fix cache.get_size with non-default PeekModes This closes #24 --- pyignite/aio_cache.py | 4 +-- pyignite/api/key_value.py | 32 ++++++++++-------- pyignite/cache.py | 4 +-- pyignite/datatypes/key_value.py | 17 +++++----- tests/common/test_cache_size.py | 60 +++++++++++++++++++++++++++++++++ tests/util.py | 8 ++--- 6 files changed, 94 insertions(+), 31 deletions(-) create mode 100644 tests/common/test_cache_size.py diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py index b92a14c..a2af0a7 100644 --- a/pyignite/aio_cache.py +++ b/pyignite/aio_cache.py @@ -572,13 +572,13 @@ async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint return result @status_to_exception(CacheError) - async def get_size(self, peek_modes=0): + async def get_size(self, peek_modes=None): """ Gets the number of entries in cache. :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache - (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), + (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), :return: integer number of cache entries. """ conn = await self.get_best_node() diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 6d5663c..9fb13bb 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -23,9 +23,8 @@ OP_CACHE_CLEAR_KEYS, OP_CACHE_REMOVE_KEY, OP_CACHE_REMOVE_IF_EQUALS, OP_CACHE_REMOVE_KEYS, OP_CACHE_REMOVE_ALL, OP_CACHE_GET_SIZE, OP_CACHE_LOCAL_PEEK ) -from pyignite.datatypes import Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject +from pyignite.datatypes import Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, ByteArray from pyignite.datatypes.base import IgniteDataType -from pyignite.datatypes.key_value import PeekModes from pyignite.queries import Query, query_perform from pyignite.utils import cache_id @@ -1128,7 +1127,7 @@ def __cache_remove_all(connection, cache, binary, query_id): ) -def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: Union[int, list, tuple] = 0, +def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: Union[int, list, tuple] = None, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Gets the number of entries in cache. @@ -1137,7 +1136,7 @@ def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: :param cache: name or ID of the cache, :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache - (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), + (PeekModes.BACKUP). Defaults to pimary cache partitions (PeekModes.PRIMARY), :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is @@ -1151,21 +1150,23 @@ def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: async def cache_get_size_async(connection: 'AioConnection', cache: Union[str, int], - peek_modes: Union[int, list, tuple] = 0, binary: bool = False, + peek_modes: Union[int, list, tuple] = None, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': return await __cache_get_size(connection, cache, peek_modes, binary, query_id) def __cache_get_size(connection, cache, peek_modes, binary, query_id): - if not isinstance(peek_modes, (list, tuple)): - peek_modes = [peek_modes] if peek_modes else [] + if peek_modes is None: + peek_modes = [] + elif not isinstance(peek_modes, (list, tuple)): + peek_modes = [peek_modes] query_struct = Query( OP_CACHE_GET_SIZE, [ ('hash_code', Int), ('flag', Byte), - ('peek_modes', PeekModes), + ('peek_modes', ByteArray), ], query_id=query_id, ) @@ -1184,7 +1185,7 @@ def __cache_get_size(connection, cache, peek_modes, binary, query_id): def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = 0, binary: bool = False, + peek_modes: Union[int, list, tuple] = None, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': """ Peeks at in-memory cached value using default optional peek mode. @@ -1199,7 +1200,7 @@ def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_h should be converted, :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache - (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), + (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), :param binary: (optional) pass True to keep the value in binary form. False by default, :param query_id: (optional) a value generated by client and returned as-is @@ -1213,7 +1214,8 @@ def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_h async def cache_local_peek_async( conn: 'AioConnection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = 0, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + peek_modes: Union[int, list, tuple] = None, binary: bool = False, + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_local_peek. """ @@ -1221,8 +1223,10 @@ async def cache_local_peek_async( def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id): - if not isinstance(peek_modes, (list, tuple)): - peek_modes = [peek_modes] if peek_modes else [] + if peek_modes is None: + peek_modes = [] + elif not isinstance(peek_modes, (list, tuple)): + peek_modes = [peek_modes] query_struct = Query( OP_CACHE_LOCAL_PEEK, @@ -1230,7 +1234,7 @@ def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) ('hash_code', Int), ('flag', Byte), ('key', key_hint or AnyDataObject), - ('peek_modes', PeekModes), + ('peek_modes', ByteArray), ], query_id=query_id, ) diff --git a/pyignite/cache.py b/pyignite/cache.py index 5fba6fb..2602d1c 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -694,13 +694,13 @@ def replace_if_equals( return result @status_to_exception(CacheError) - def get_size(self, peek_modes=0): + def get_size(self, peek_modes=None): """ Gets the number of entries in cache. :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache - (PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL), + (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), :return: integer number of cache entries. """ return cache_get_size( diff --git a/pyignite/datatypes/key_value.py b/pyignite/datatypes/key_value.py index ee2ae7b..46ac07d 100644 --- a/pyignite/datatypes/key_value.py +++ b/pyignite/datatypes/key_value.py @@ -13,14 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .primitive_arrays import ByteArray +from enum import IntEnum -class PeekModes(ByteArray): - - ALL = 1 - NEAR = 2 - PRIMARY = 4 - BACKUP = 8 - ONHEAP = 16 - OFFHEAP = 32 +class PeekModes(IntEnum): + ALL = 0 + NEAR = 1 + PRIMARY = 2 + BACKUP = 3 + ONHEAP = 4 + OFFHEAP = 5 diff --git a/tests/common/test_cache_size.py b/tests/common/test_cache_size.py new file mode 100644 index 0000000..d134903 --- /dev/null +++ b/tests/common/test_cache_size.py @@ -0,0 +1,60 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite.datatypes.key_value import PeekModes +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_IS_ONHEAP_CACHE_ENABLED, PROP_BACKUPS_NUMBER +from tests.util import get_or_create_cache, get_or_create_cache_async + +test_params = [ + [ + { + PROP_NAME: 'cache_onheap_backups_2', + PROP_IS_ONHEAP_CACHE_ENABLED: True, + PROP_BACKUPS_NUMBER: 2 + }, + [ + [None, 1], + [PeekModes.PRIMARY, 1], + [PeekModes.BACKUP, 2], + [PeekModes.ALL, 3], + [[PeekModes.PRIMARY, PeekModes.BACKUP], 3], + [PeekModes.ONHEAP, 1], + [PeekModes.OFFHEAP, 1] + ] + ] +] + + +@pytest.mark.parametrize("cache_settings, cache_sizes", test_params) +def test_cache_size(client, cache_settings, cache_sizes): + with get_or_create_cache(client, cache_settings) as cache: + cache.put(1, 1) + + for props, exp_value in cache_sizes: + value = cache.get_size(props) + assert value == exp_value, f"expected {exp_value} for {props}, got {value} instead." + + +@pytest.mark.asyncio +@pytest.mark.parametrize("cache_settings, cache_sizes", test_params) +async def test_cache_size_async(async_client, cache_settings, cache_sizes): + async with get_or_create_cache_async(async_client, cache_settings) as cache: + await cache.put(1, 1) + + for props, exp_value in cache_sizes: + value = await cache.get_size(props) + assert value == exp_value, f"expected {exp_value} for {props}, got {value} instead." diff --git a/tests/util.py b/tests/util.py index 2ca898b..064ac7a 100644 --- a/tests/util.py +++ b/tests/util.py @@ -35,8 +35,8 @@ @contextlib.contextmanager -def get_or_create_cache(client, cache_name): - cache = client.get_or_create_cache(cache_name) +def get_or_create_cache(client, settings): + cache = client.get_or_create_cache(settings) try: yield cache finally: @@ -44,8 +44,8 @@ def get_or_create_cache(client, cache_name): @asynccontextmanager -async def get_or_create_cache_async(client, cache_name): - cache = await client.get_or_create_cache(cache_name) +async def get_or_create_cache_async(client, settings): + cache = await client.get_or_create_cache(settings) try: yield cache finally: From 2fd7fda79d756b760ffa40a3afccb28db7a3b11e Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Tue, 30 Mar 2021 12:55:33 +0300 Subject: [PATCH 24/62] IGNITE-13405 Fix cache configuration serialization/deserialization This closes #25 --- pyignite/datatypes/cache_config.py | 4 +- pyignite/datatypes/cache_properties.py | 2 +- pyignite/datatypes/prop_codes.py | 2 - tests/common/test_cache_config.py | 108 ++++++++++++++++++++----- tests/config/ignite-config.xml.jinja2 | 29 ++++--- 5 files changed, 112 insertions(+), 33 deletions(-) diff --git a/pyignite/datatypes/cache_config.py b/pyignite/datatypes/cache_config.py index 67b353d..04ff607 100644 --- a/pyignite/datatypes/cache_config.py +++ b/pyignite/datatypes/cache_config.py @@ -120,16 +120,16 @@ class CacheAtomicityMode(Int): cache_config_struct = Struct([ ('length', Int), + ('cache_atomicity_mode', CacheAtomicityMode), ('backups_number', Int), ('cache_mode', CacheMode), - ('cache_atomicity_mode', CacheAtomicityMode), ('copy_on_read', Bool), ('data_region_name', String), ('eager_ttl', Bool), ('statistics_enabled', Bool), ('group_name', String), - ('invalidate', Int), ('default_lock_timeout', Long), + ('max_concurrent_async_operations', Int), ('max_query_iterators', Int), ('name', String), ('is_onheap_cache_enabled', Bool), diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index 127b6f3..d924507 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -67,7 +67,7 @@ def prop_map(code: int): PROP_CACHE_KEY_CONFIGURATION: PropCacheKeyConfiguration, PROP_DEFAULT_LOCK_TIMEOUT: PropDefaultLockTimeout, PROP_MAX_CONCURRENT_ASYNC_OPERATIONS: PropMaxConcurrentAsyncOperation, - PROP_PARTITION_LOSS_POLICY: PartitionLossPolicy, + PROP_PARTITION_LOSS_POLICY: PropPartitionLossPolicy, PROP_EAGER_TTL: PropEagerTTL, PROP_STATISTICS_ENABLED: PropStatisticsEnabled, }[code] diff --git a/pyignite/datatypes/prop_codes.py b/pyignite/datatypes/prop_codes.py index adea281..72ffce1 100644 --- a/pyignite/datatypes/prop_codes.py +++ b/pyignite/datatypes/prop_codes.py @@ -47,5 +47,3 @@ PROP_PARTITION_LOSS_POLICY = 404 PROP_EAGER_TTL = 405 PROP_STATISTICS_ENABLED = 406 - -PROP_INVALIDATE = -1 diff --git a/tests/common/test_cache_config.py b/tests/common/test_cache_config.py index f4c8067..e68eef5 100644 --- a/tests/common/test_cache_config.py +++ b/tests/common/test_cache_config.py @@ -12,24 +12,88 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from inspect import getmembers + +import pyignite import pytest -from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_KEY_CONFIGURATION +from pyignite.datatypes.cache_config import ( + CacheMode, CacheAtomicityMode, WriteSynchronizationMode, PartitionLossPolicy, RebalanceMode +) +from pyignite.datatypes.prop_codes import ( + PROP_NAME, PROP_CACHE_KEY_CONFIGURATION, PROP_CACHE_MODE, PROP_CACHE_ATOMICITY_MODE, PROP_BACKUPS_NUMBER, + PROP_WRITE_SYNCHRONIZATION_MODE, PROP_COPY_ON_READ, PROP_READ_FROM_BACKUP, PROP_DATA_REGION_NAME, + PROP_IS_ONHEAP_CACHE_ENABLED, PROP_GROUP_NAME, PROP_DEFAULT_LOCK_TIMEOUT, PROP_MAX_CONCURRENT_ASYNC_OPERATIONS, + PROP_PARTITION_LOSS_POLICY, PROP_EAGER_TTL, PROP_STATISTICS_ENABLED, PROP_REBALANCE_MODE, PROP_REBALANCE_DELAY, + PROP_REBALANCE_TIMEOUT, PROP_REBALANCE_BATCH_SIZE, PROP_REBALANCE_BATCHES_PREFETCH_COUNT, PROP_REBALANCE_ORDER, + PROP_REBALANCE_THROTTLE, PROP_QUERY_ENTITIES, PROP_QUERY_PARALLELISM, PROP_QUERY_DETAIL_METRIC_SIZE, + PROP_SQL_SCHEMA, PROP_SQL_INDEX_INLINE_MAX_SIZE, PROP_SQL_ESCAPE_ALL, PROP_MAX_QUERY_ITERATORS +) from pyignite.exceptions import CacheError cache_name = 'config_cache' @pytest.fixture -def cache_config(): +def test_cache_settings(): return { PROP_NAME: cache_name, + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL, + PROP_BACKUPS_NUMBER: 2, + PROP_WRITE_SYNCHRONIZATION_MODE: WriteSynchronizationMode.FULL_SYNC, + PROP_COPY_ON_READ: True, + PROP_READ_FROM_BACKUP: True, + PROP_DATA_REGION_NAME: 'SmallDataRegion', + PROP_IS_ONHEAP_CACHE_ENABLED: True, + PROP_QUERY_ENTITIES: [{ + 'table_name': cache_name + '_table', + 'key_field_name': 'KEY', + 'key_type_name': 'java.lang.String', + 'value_field_name': 'VAL', + 'value_type_name': 'java.lang.String', + 'field_name_aliases': [ + {'alias': 'val', 'field_name': 'VAL'}, + {'alias': 'key', 'field_name': 'KEY'} + ], + 'query_fields': [ + { + 'name': 'KEY', + 'type_name': 'java.lang.String' + }, + { + 'name': 'VAL', + 'type_name': 'java.lang.String' + } + ], + 'query_indexes': [] + }], + PROP_QUERY_PARALLELISM: 20, + PROP_QUERY_DETAIL_METRIC_SIZE: 10, + PROP_SQL_SCHEMA: 'PUBLIC', + PROP_SQL_INDEX_INLINE_MAX_SIZE: 1024, + PROP_SQL_ESCAPE_ALL: True, + PROP_MAX_QUERY_ITERATORS: 200, + PROP_REBALANCE_MODE: RebalanceMode.SYNC, + PROP_REBALANCE_DELAY: 1000, + PROP_REBALANCE_TIMEOUT: 5000, + PROP_REBALANCE_BATCH_SIZE: 100, + PROP_REBALANCE_BATCHES_PREFETCH_COUNT: 10, + PROP_REBALANCE_ORDER: 3, + PROP_REBALANCE_THROTTLE: 10, + PROP_GROUP_NAME: cache_name + '_group', PROP_CACHE_KEY_CONFIGURATION: [ { - 'type_name': 'blah', + 'type_name': 'java.lang.String', 'affinity_key_field_name': 'abc1234', } ], + PROP_DEFAULT_LOCK_TIMEOUT: 3000, + PROP_MAX_CONCURRENT_ASYNC_OPERATIONS: 100, + PROP_PARTITION_LOSS_POLICY: PartitionLossPolicy.READ_WRITE_ALL, + PROP_EAGER_TTL: True, + PROP_STATISTICS_ENABLED: True } @@ -48,15 +112,15 @@ async def async_cache(async_client): @pytest.fixture -def cache_with_config(client, cache_config): - cache = client.get_or_create_cache(cache_config) +def cache_with_config(client, test_cache_settings): + cache = client.get_or_create_cache(test_cache_settings) yield cache cache.destroy() @pytest.fixture -async def async_cache_with_config(async_client, cache_config): - cache = await async_client.get_or_create_cache(cache_config) +async def async_cache_with_config(async_client, test_cache_settings): + cache = await async_client.get_or_create_cache(test_cache_settings) yield cache await cache.destroy() @@ -72,44 +136,50 @@ async def test_cache_get_configuration_async(async_client, async_cache): assert (await async_cache.settings())[PROP_NAME] == cache_name -def test_get_or_create_with_config_existing(client, cache_with_config, cache_config): +def test_get_or_create_with_config_existing(client, cache_with_config, test_cache_settings): assert cache_name in client.get_cache_names() with pytest.raises(CacheError): - client.create_cache(cache_config) + client.create_cache(test_cache_settings) - cache = client.get_or_create_cache(cache_config) + cache = client.get_or_create_cache(test_cache_settings) assert cache.settings == cache_with_config.settings @pytest.mark.asyncio -async def test_get_or_create_with_config_existing_async(async_client, async_cache_with_config, cache_config): +async def test_get_or_create_with_config_existing_async(async_client, async_cache_with_config, test_cache_settings): assert cache_name in (await async_client.get_cache_names()) with pytest.raises(CacheError): - await async_client.create_cache(cache_config) + await async_client.create_cache(test_cache_settings) - cache = await async_client.get_or_create_cache(cache_config) + cache = await async_client.get_or_create_cache(test_cache_settings) assert (await cache.settings()) == (await async_cache_with_config.settings()) +ALL_PROPS = {name: value for name, value in getmembers(pyignite.datatypes.prop_codes) if name.startswith('PROP')} + -def test_get_or_create_with_config_new(client, cache_config): +def test_get_or_create_with_config_new(client, test_cache_settings): assert cache_name not in client.get_cache_names() - cache = client.get_or_create_cache(cache_config) + cache = client.get_or_create_cache(test_cache_settings) try: assert cache_name in client.get_cache_names() - assert cache.settings[PROP_NAME] == cache_name + real_cache_settings = cache.settings + assert real_cache_settings == test_cache_settings + assert set(real_cache_settings.keys()) == set(ALL_PROPS.values()) finally: cache.destroy() @pytest.mark.asyncio -async def test_get_or_create_with_config_new_async(async_client, cache_config): +async def test_get_or_create_with_config_new_async(async_client, test_cache_settings): assert cache_name not in (await async_client.get_cache_names()) - cache = await async_client.get_or_create_cache(cache_config) + cache = await async_client.get_or_create_cache(test_cache_settings) try: assert cache_name in (await async_client.get_cache_names()) - assert (await cache.settings())[PROP_NAME] == cache_name + real_cache_settings = await cache.settings() + assert real_cache_settings == test_cache_settings + assert set(real_cache_settings.keys()) == set(ALL_PROPS.values()) finally: await cache.destroy() diff --git a/tests/config/ignite-config.xml.jinja2 b/tests/config/ignite-config.xml.jinja2 index 85daf0f..2bf5129 100644 --- a/tests/config/ignite-config.xml.jinja2 +++ b/tests/config/ignite-config.xml.jinja2 @@ -27,20 +27,31 @@ http://www.springframework.org/schema/util/spring-util.xsd"> - {% if use_auth %} - - - + + + + + {% if use_auth %} + + {% endif %} + + + + - + + - - - + + + + - + {% if use_auth %} + {% endif %} + {% if use_ssl %} {% endif %} From 7cbfe324eb2fb16335b51191cbed9b0e1dc8c88d Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Wed, 31 Mar 2021 15:49:07 +0300 Subject: [PATCH 25/62] IGNITE-14444 Move affinity mapping storage and best node calculation to clients This closes #26 --- pyignite/aio_cache.py | 183 +++---------- pyignite/aio_client.py | 92 ++++++- pyignite/cache.py | 240 ++++-------------- pyignite/client.py | 161 +++++++++++- tests/affinity/test_affinity.py | 4 +- .../affinity/test_affinity_request_routing.py | 136 +++++++++- tests/common/test_cache_class.py | 4 +- tests/common/test_cache_size.py | 8 +- tests/util.py | 1 - 9 files changed, 476 insertions(+), 353 deletions(-) diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py index a2af0a7..24d4bce 100644 --- a/pyignite/aio_cache.py +++ b/pyignite/aio_cache.py @@ -13,15 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import asyncio -from typing import Any, Dict, Iterable, Optional, Union +from typing import Any, Iterable, Optional, Union -from .constants import AFFINITY_RETRIES, AFFINITY_DELAY -from .connection import AioConnection -from .datatypes import prop_codes -from .datatypes.base import IgniteDataType from .datatypes.internal import AnyDataObject -from .exceptions import CacheCreationError, CacheError, ParameterError, connection_errors -from .utils import cache_id, status_to_exception +from .exceptions import CacheCreationError, CacheError, ParameterError +from .utils import status_to_exception from .api.cache_config import ( cache_create_async, cache_get_or_create_async, cache_destroy_async, cache_get_configuration_async, cache_create_with_config_async, cache_get_or_create_with_config_async @@ -34,8 +30,7 @@ cache_remove_if_equals_async, cache_replace_if_equals_async, cache_get_size_async, ) from .cursors import AioScanCursor -from .api.affinity import cache_get_node_partitions_async -from .cache import __parse_settings, BaseCacheMixin +from .cache import __parse_settings, BaseCache async def get_cache(client: 'AioClient', settings: Union[str, dict]) -> 'AioCache': @@ -76,13 +71,13 @@ async def get_or_create_cache(client: 'AioClient', settings: Union[str, dict]) - return AioCache(client, name) -class AioCache(BaseCacheMixin): +class AioCache(BaseCache): """ Ignite cache abstraction. Users should never use this class directly, but construct its instances with - :py:meth:`~pyignite.client.Client.create_cache`, - :py:meth:`~pyignite.client.Client.get_or_create_cache` or - :py:meth:`~pyignite.client.Client.get_cache` methods instead. See + :py:meth:`~pyignite.aio_client.AioClient.create_cache`, + :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` or + :py:meth:`~pyignite.aio_client.AioClient.get_cache` methods instead. See :ref:`this example ` on how to do it. """ def __init__(self, client: 'AioClient', name: str): @@ -92,12 +87,10 @@ def __init__(self, client: 'AioClient', name: str): :param client: Async Ignite client, :param name: Cache name. """ - self._client = client - self._name = name - self._cache_id = cache_id(self._name) - self._settings = None - self._affinity_query_mux = asyncio.Lock() - self.affinity = {'version': (0, 0)} + super().__init__(client, name) + + async def _get_best_node(self, key=None, key_hint=None): + return await self.client.get_best_node(self._cache_id, key, key_hint) async def settings(self) -> Optional[dict]: """ @@ -109,7 +102,7 @@ async def settings(self) -> Optional[dict]: :return: dict of cache properties and their values. """ if self._settings is None: - conn = await self.get_best_node() + conn = await self._get_best_node() config_result = await cache_get_configuration_async(conn, self._cache_id) if config_result.status == 0: @@ -119,120 +112,14 @@ async def settings(self) -> Optional[dict]: return self._settings - async def name(self) -> str: - """ - Lazy cache name. - - :return: cache name string. - """ - if self._name is None: - settings = await self.settings() - self._name = settings[prop_codes.PROP_NAME] - - return self._name - - @property - def client(self) -> 'AioClient': - """ - Ignite :class:`~pyignite.aio_client.AioClient` object. - - :return: Async client object, through which the cache is accessed. - """ - return self._client - - @property - def cache_id(self) -> int: - """ - Cache ID. - - :return: integer value of the cache ID. - """ - return self._cache_id - @status_to_exception(CacheError) async def destroy(self): """ Destroys cache with a given name. """ - conn = await self.get_best_node() + conn = await self._get_best_node() return await cache_destroy_async(conn, self._cache_id) - @status_to_exception(CacheError) - async def _get_affinity(self, conn: 'AioConnection') -> Dict: - """ - Queries server for affinity mappings. Retries in case - of an intermittent error (most probably “Getting affinity for topology - version earlier than affinity is calculated”). - - :param conn: connection to Igneite server, - :return: OP_CACHE_PARTITIONS operation result value. - """ - for _ in range(AFFINITY_RETRIES or 1): - result = await cache_get_node_partitions_async(conn, self._cache_id) - if result.status == 0 and result.value['partition_mapping']: - break - await asyncio.sleep(AFFINITY_DELAY) - - return result - - async def get_best_node(self, key: Any = None, key_hint: 'IgniteDataType' = None) -> 'AioConnection': - """ - Returns the node from the list of the nodes, opened by client, that - most probably contains the needed key-value pair. See IEP-23. - - This method is not a part of the public API. Unless you wish to - extend the `pyignite` capabilities (with additional testing, logging, - examining connections, et c.) you probably should not use it. - - :param key: (optional) pythonic key, - :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :return: Ignite connection object. - """ - conn = await self._client.random_node() - - if self.client.partition_aware and key is not None: - if self.__should_update_mapping(): - async with self._affinity_query_mux: - while self.__should_update_mapping(): - try: - full_affinity = await self._get_affinity(conn) - self._update_affinity(full_affinity) - - asyncio.ensure_future( - asyncio.gather( - *[conn.reconnect() for conn in self.client._nodes if not conn.alive], - return_exceptions=True - ) - ) - - break - except connection_errors: - # retry if connection failed - conn = await self._client.random_node() - pass - except CacheError: - # server did not create mapping in time - return conn - - parts = self.affinity.get('number_of_partitions') - - if not parts: - return conn - - key, key_hint = self._get_affinity_key(key, key_hint) - - hashcode = await key_hint.hashcode_async(key, self._client) - - best_node = self._get_node_by_hashcode(hashcode, parts) - if best_node: - return best_node - - return conn - - def __should_update_mapping(self): - return self.affinity['version'] < self._client.affinity_version - @status_to_exception(CacheError) async def get(self, key, key_hint: object = None) -> Any: """ @@ -246,7 +133,7 @@ async def get(self, key, key_hint: object = None) -> Any: if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) result = await cache_get_async(conn, self._cache_id, key, key_hint=key_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -267,7 +154,7 @@ async def put(self, key, value, key_hint: object = None, value_hint: object = No if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) return await cache_put_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) @status_to_exception(CacheError) @@ -278,7 +165,7 @@ async def get_all(self, keys: list) -> list: :param keys: list of keys or tuples of (key, key_hint), :return: a dict of key-value pairs. """ - conn = await self.get_best_node() + conn = await self._get_best_node() result = await cache_get_all_async(conn, self._cache_id, keys) if result.value: keys = list(result.value.keys()) @@ -298,7 +185,7 @@ async def put_all(self, pairs: dict): to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), """ - conn = await self.get_best_node() + conn = await self._get_best_node() return await cache_put_all_async(conn, self._cache_id, pairs) @status_to_exception(CacheError) @@ -316,7 +203,7 @@ async def replace(self, key, value, key_hint: object = None, value_hint: object if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) result = await cache_replace_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -329,7 +216,7 @@ async def clear(self, keys: Optional[list] = None): :param keys: (optional) list of cache keys or (key, key type hint) tuples to clear (default: clear all). """ - conn = await self.get_best_node() + conn = await self._get_best_node() if keys: return await cache_clear_keys_async(conn, self._cache_id, keys) else: @@ -347,7 +234,7 @@ async def clear_key(self, key, key_hint: object = None): if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) return await cache_clear_key_async(conn, self._cache_id, key, key_hint=key_hint) @status_to_exception(CacheError) @@ -357,7 +244,7 @@ async def clear_keys(self, keys: Iterable): :param keys: a list of keys or (key, type hint) tuples """ - conn = await self.get_best_node() + conn = await self._get_best_node() return await cache_clear_keys_async(conn, self._cache_id, keys) @status_to_exception(CacheError) @@ -373,7 +260,7 @@ async def contains_key(self, key, key_hint=None) -> bool: if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) return await cache_contains_key_async(conn, self._cache_id, key, key_hint=key_hint) @status_to_exception(CacheError) @@ -384,7 +271,7 @@ async def contains_keys(self, keys: Iterable) -> bool: :param keys: a list of keys or (key, type hint) tuples, :return: boolean `True` when all keys are present, `False` otherwise. """ - conn = await self.get_best_node() + conn = await self._get_best_node() return await cache_contains_keys_async(conn, self._cache_id, keys) @status_to_exception(CacheError) @@ -404,7 +291,7 @@ async def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) result = await cache_get_and_put_async(conn, self._cache_id, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) @@ -427,7 +314,7 @@ async def get_and_put_if_absent(self, key, value, key_hint=None, value_hint=None if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) result = await cache_get_and_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -448,7 +335,7 @@ async def put_if_absent(self, key, value, key_hint=None, value_hint=None): if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) return await cache_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) @status_to_exception(CacheError) @@ -464,7 +351,7 @@ async def get_and_remove(self, key, key_hint=None) -> Any: if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) result = await cache_get_and_remove_async(conn, self._cache_id, key, key_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -487,7 +374,7 @@ async def get_and_replace(self, key, value, key_hint=None, value_hint=None) -> A if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) result = await cache_get_and_replace_async(conn, self._cache_id, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -504,7 +391,7 @@ async def remove_key(self, key, key_hint=None): if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) return await cache_remove_key_async(conn, self._cache_id, key, key_hint) @status_to_exception(CacheError) @@ -515,7 +402,7 @@ async def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ - conn = await self.get_best_node() + conn = await self._get_best_node() return await cache_remove_keys_async(conn, self._cache_id, keys) @status_to_exception(CacheError) @@ -523,7 +410,7 @@ async def remove_all(self): """ Removes all cache entries, notifying listeners and cache writers. """ - conn = await self.get_best_node() + conn = await self._get_best_node() return await cache_remove_all_async(conn, self._cache_id) @status_to_exception(CacheError) @@ -542,7 +429,7 @@ async def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) return await cache_remove_if_equals_async(conn, self._cache_id, key, sample, key_hint, sample_hint) @status_to_exception(CacheError) @@ -565,7 +452,7 @@ async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint if key_hint is None: key_hint = AnyDataObject.map_python_type(key) - conn = await self.get_best_node(key, key_hint) + conn = await self._get_best_node(key, key_hint) result = await cache_replace_if_equals_async(conn, self._cache_id, key, sample, value, key_hint, sample_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) @@ -581,7 +468,7 @@ async def get_size(self, peek_modes=None): (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), :return: integer number of cache entries. """ - conn = await self.get_best_node() + conn = await self._get_best_node() return await cache_get_size_async(conn, self._cache_id, peek_modes) def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False): diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index d2cc3ff..5e64450 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -15,19 +15,21 @@ import asyncio import random from itertools import chain -from typing import Iterable, Type, Union, Any +from typing import Iterable, Type, Union, Any, Dict +from .api import cache_get_node_partitions_async from .api.binary import get_binary_type_async, put_binary_type_async from .api.cache_config import cache_get_names_async +from .cache import BaseCache from .client import BaseClient from .cursors import AioSqlFieldsCursor from .aio_cache import AioCache, get_cache, create_cache, get_or_create_cache from .connection import AioConnection -from .constants import IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT +from .constants import AFFINITY_RETRIES, AFFINITY_DELAY from .datatypes import BinaryObject from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors from .stream import AioBinaryStream, READ_BACKWARD -from .utils import cache_id, entity_id, status_to_exception, is_iterable, is_wrapped +from .utils import cache_id, entity_id, status_to_exception, is_wrapped __all__ = ['AioClient'] @@ -72,6 +74,7 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = False, * """ super().__init__(compact_footer, partition_aware, **kwargs) self._registry_mux = asyncio.Lock() + self._affinity_query_mux = asyncio.Lock() def connect(self, *args): """ @@ -271,6 +274,89 @@ async def unwrap_binary(self, value: Any) -> Any: return await BinaryObject.to_python_async(stream.read_ctype(data_class, direction=READ_BACKWARD), self) return value + @status_to_exception(CacheError) + async def _get_affinity(self, conn: 'AioConnection', caches: Iterable[int]) -> Dict: + """ + Queries server for affinity mappings. Retries in case + of an intermittent error (most probably “Getting affinity for topology + version earlier than affinity is calculated”). + + :param conn: connection to Igneite server, + :param caches: Ids of caches, + :return: OP_CACHE_PARTITIONS operation result value. + """ + for _ in range(AFFINITY_RETRIES or 1): + result = await cache_get_node_partitions_async(conn, caches) + if result.status == 0 and result.value['partition_mapping']: + break + await asyncio.sleep(AFFINITY_DELAY) + + return result + + async def get_best_node( + self, cache: Union[int, str, 'BaseCache'], key: Any = None, key_hint: 'IgniteDataType' = None + ) -> 'AioConnection': + """ + Returns the node from the list of the nodes, opened by client, that + most probably contains the needed key-value pair. See IEP-23. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + + :param cache: Ignite cache, cache name or cache id, + :param key: (optional) pythonic key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: Ignite connection object. + """ + conn = await self.random_node() + + if self.partition_aware and key is not None: + caches = self._caches_to_update_affinity() + if caches: + async with self._affinity_query_mux: + while True: + caches = self._caches_to_update_affinity() + if not caches: + break + + try: + full_affinity = await self._get_affinity(conn, caches) + self._update_affinity(full_affinity) + + asyncio.ensure_future( + asyncio.gather( + *[conn.reconnect() for conn in self._nodes if not conn.alive], + return_exceptions=True + ) + ) + + break + except connection_errors: + # retry if connection failed + conn = await self.random_node() + pass + except CacheError: + # server did not create mapping in time + return conn + + c_id = cache.cache_id if isinstance(cache, BaseCache) else cache_id(cache) + parts = self._cache_partition_mapping(c_id).get('number_of_partitions') + + if not parts: + return conn + + key, key_hint = self._get_affinity_key(c_id, key, key_hint) + + hashcode = await key_hint.hashcode_async(key, self) + + best_node = self._get_node_by_hashcode(c_id, hashcode, parts) + if best_node: + return best_node + + return conn + async def create_cache(self, settings: Union[str, dict]) -> 'AioCache': """ Creates Ignite cache by name. Raises `CacheError` if such a cache is diff --git a/pyignite/cache.py b/pyignite/cache.py index 2602d1c..f00f000 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -13,15 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time -from typing import Any, Dict, Iterable, Optional, Tuple, Union +from typing import Any, Iterable, Optional, Tuple, Union -from .constants import AFFINITY_RETRIES, AFFINITY_DELAY -from .binary import GenericObjectMeta from .datatypes import prop_codes from .datatypes.internal import AnyDataObject -from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError, connection_errors -from .utils import cache_id, get_field_by_id, status_to_exception, unsigned +from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError +from .utils import cache_id, status_to_exception from .api.cache_config import ( cache_create, cache_create_with_config, cache_get_or_create, cache_get_or_create_with_config, cache_destroy, cache_get_configuration @@ -33,7 +30,6 @@ cache_remove_if_equals, cache_replace_if_equals, cache_get_size ) from .cursors import ScanCursor, SqlCursor -from .api.affinity import cache_get_node_partitions PROP_CODES = set([ getattr(prop_codes, x) @@ -96,65 +92,39 @@ def __parse_settings(settings: Union[str, dict]) -> Tuple[Optional[str], Optiona raise ParameterError('You should supply at least cache name') -class BaseCacheMixin: - def _get_affinity_key(self, key, key_hint=None): - if key_hint is None: - key_hint = AnyDataObject.map_python_type(key) - - if self.affinity.get('is_applicable'): - config = self.affinity.get('cache_config') - if config: - affinity_key_id = config.get(key_hint.type_id) - - if affinity_key_id and isinstance(key, GenericObjectMeta): - return get_field_by_id(key, affinity_key_id) - - return key, key_hint - - def _update_affinity(self, full_affinity): - self.affinity['version'] = full_affinity['version'] - - full_mapping = full_affinity.get('partition_mapping') - if full_mapping and self.cache_id in full_mapping: - self.affinity.update(full_mapping[self.cache_id]) +class BaseCache: + def __init__(self, client: 'BaseClient', name: str): + self._client = client + self._name = name + self._settings = None + self._cache_id = cache_id(self._name) + self._client.register_cache(self._cache_id) - def _get_node_by_hashcode(self, hashcode, parts): + @property + def name(self) -> str: """ - Get node by key hashcode. Calculate partition and return node on that it is primary. - (algorithm is taken from `RendezvousAffinityFunction.java`) + :return: cache name string. """ + return self._name - # calculate partition for key or affinity key - # (algorithm is taken from `RendezvousAffinityFunction.java`) - mask = parts - 1 - - if parts & mask == 0: - part = (hashcode ^ (unsigned(hashcode) >> 16)) & mask - else: - part = abs(hashcode // parts) - - assert 0 <= part < parts, 'Partition calculation has failed' - - node_mapping = self.affinity.get('node_mapping') - if not node_mapping: - return None + @property + def client(self) -> 'BaseClient': + """ + :return: Client object, through which the cache is accessed. + """ + return self._client - node_uuid, best_conn = None, None - for u, p in node_mapping.items(): - if part in p: - node_uuid = u - break + @property + def cache_id(self) -> int: + """ + Cache ID. - if node_uuid: - for n in self.client._nodes: - if n.uuid == node_uuid: - best_conn = n - break - if best_conn and best_conn.alive: - return best_conn + :return: integer value of the cache ID. + """ + return self._cache_id -class Cache(BaseCacheMixin): +class Cache(BaseCache): """ Ignite cache abstraction. Users should never use this class directly, but construct its instances with @@ -171,11 +141,10 @@ def __init__(self, client: 'Client', name: str): :param client: Ignite client, :param name: Cache name. """ - self._client = client - self._name = name - self._settings = None - self._cache_id = cache_id(self._name) - self.affinity = {'version': (0, 0)} + super().__init__(client, name) + + def _get_best_node(self, key=None, key_hint=None): + return self.client.get_best_node(self._cache_id, key, key_hint) @property def settings(self) -> Optional[dict]: @@ -189,7 +158,7 @@ def settings(self) -> Optional[dict]: """ if self._settings is None: config_result = cache_get_configuration( - self.get_best_node(), + self._get_best_node(), self._cache_id ) if config_result.status == 0: @@ -199,111 +168,12 @@ def settings(self) -> Optional[dict]: return self._settings - @property - def name(self) -> str: - """ - Lazy cache name. - - :return: cache name string. - """ - if self._name is None: - self._name = self.settings[prop_codes.PROP_NAME] - - return self._name - - @property - def client(self) -> 'Client': - """ - Ignite :class:`~pyignite.client.Client` object. - - :return: Client object, through which the cache is accessed. - """ - return self._client - - @property - def cache_id(self) -> int: - """ - Cache ID. - - :return: integer value of the cache ID. - """ - return self._cache_id - @status_to_exception(CacheError) def destroy(self): """ Destroys cache with a given name. """ - return cache_destroy(self.get_best_node(), self._cache_id) - - @status_to_exception(CacheError) - def _get_affinity(self, conn: 'Connection') -> Dict: - """ - Queries server for affinity mappings. Retries in case - of an intermittent error (most probably “Getting affinity for topology - version earlier than affinity is calculated”). - - :param conn: connection to Igneite server, - :return: OP_CACHE_PARTITIONS operation result value. - """ - for _ in range(AFFINITY_RETRIES or 1): - result = cache_get_node_partitions(conn, self._cache_id) - if result.status == 0 and result.value['partition_mapping']: - break - time.sleep(AFFINITY_DELAY) - - return result - - def get_best_node(self, key: Any = None, key_hint: 'IgniteDataType' = None) -> 'Connection': - """ - Returns the node from the list of the nodes, opened by client, that - most probably contains the needed key-value pair. See IEP-23. - - This method is not a part of the public API. Unless you wish to - extend the `pyignite` capabilities (with additional testing, logging, - examining connections, et c.) you probably should not use it. - - :param key: (optional) pythonic key, - :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :return: Ignite connection object. - """ - conn = self._client.random_node - - if self.client.partition_aware and key is not None: - if self.affinity['version'] < self._client.affinity_version: - # update partition mapping - while True: - try: - full_affinity = self._get_affinity(conn) - break - except connection_errors: - # retry if connection failed - conn = self._client.random_node - pass - except CacheError: - # server did not create mapping in time - return conn - - self._update_affinity(full_affinity) - - for conn in self.client._nodes: - if not conn.alive: - conn.reconnect() - - parts = self.affinity.get('number_of_partitions') - - if not parts: - return conn - - key, key_hint = self._get_affinity_key(key, key_hint) - hashcode = key_hint.hashcode(key, self._client) - - best_node = self._get_node_by_hashcode(hashcode, parts) - if best_node: - return best_node - - return conn + return cache_destroy(self._get_best_node(), self._cache_id) @status_to_exception(CacheError) def get(self, key, key_hint: object = None) -> Any: @@ -319,7 +189,7 @@ def get(self, key, key_hint: object = None) -> Any: key_hint = AnyDataObject.map_python_type(key) result = cache_get( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, key_hint=key_hint @@ -346,7 +216,7 @@ def put( key_hint = AnyDataObject.map_python_type(key) return cache_put( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint ) @@ -359,7 +229,7 @@ def get_all(self, keys: list) -> list: :param keys: list of keys or tuples of (key, key_hint), :return: a dict of key-value pairs. """ - result = cache_get_all(self.get_best_node(), self._cache_id, keys) + result = cache_get_all(self._get_best_node(), self._cache_id, keys) if result.value: for key, value in result.value.items(): result.value[key] = self.client.unwrap_binary(value) @@ -375,7 +245,7 @@ def put_all(self, pairs: dict): to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), """ - return cache_put_all(self.get_best_node(), self._cache_id, pairs) + return cache_put_all(self._get_best_node(), self._cache_id, pairs) @status_to_exception(CacheError) def replace( @@ -395,7 +265,7 @@ def replace( key_hint = AnyDataObject.map_python_type(key) result = cache_replace( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint ) @@ -410,7 +280,7 @@ def clear(self, keys: Optional[list] = None): :param keys: (optional) list of cache keys or (key, key type hint) tuples to clear (default: clear all). """ - conn = self.get_best_node() + conn = self._get_best_node() if keys: return cache_clear_keys(conn, self._cache_id, keys) else: @@ -429,7 +299,7 @@ def clear_key(self, key, key_hint: object = None): key_hint = AnyDataObject.map_python_type(key) return cache_clear_key( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, key_hint=key_hint @@ -443,7 +313,7 @@ def clear_keys(self, keys: Iterable): :param keys: a list of keys or (key, type hint) tuples """ - return cache_clear_keys(self.get_best_node(), self._cache_id, keys) + return cache_clear_keys(self._get_best_node(), self._cache_id, keys) @status_to_exception(CacheError) def contains_key(self, key, key_hint=None) -> bool: @@ -459,7 +329,7 @@ def contains_key(self, key, key_hint=None) -> bool: key_hint = AnyDataObject.map_python_type(key) return cache_contains_key( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, key_hint=key_hint @@ -473,7 +343,7 @@ def contains_keys(self, keys: Iterable) -> bool: :param keys: a list of keys or (key, type hint) tuples, :return: boolean `True` when all keys are present, `False` otherwise. """ - return cache_contains_keys(self.get_best_node(), self._cache_id, keys) + return cache_contains_keys(self._get_best_node(), self._cache_id, keys) @status_to_exception(CacheError) def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: @@ -493,7 +363,7 @@ def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: key_hint = AnyDataObject.map_python_type(key) result = cache_get_and_put( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, value, key_hint, value_hint @@ -521,7 +391,7 @@ def get_and_put_if_absent( key_hint = AnyDataObject.map_python_type(key) result = cache_get_and_put_if_absent( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, value, key_hint, value_hint @@ -546,7 +416,7 @@ def put_if_absent(self, key, value, key_hint=None, value_hint=None): key_hint = AnyDataObject.map_python_type(key) return cache_put_if_absent( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, value, key_hint, value_hint @@ -566,7 +436,7 @@ def get_and_remove(self, key, key_hint=None) -> Any: key_hint = AnyDataObject.map_python_type(key) result = cache_get_and_remove( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, key_hint @@ -595,7 +465,7 @@ def get_and_replace( key_hint = AnyDataObject.map_python_type(key) result = cache_get_and_replace( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, value, key_hint, value_hint @@ -616,7 +486,7 @@ def remove_key(self, key, key_hint=None): key_hint = AnyDataObject.map_python_type(key) return cache_remove_key( - self.get_best_node(key, key_hint), self._cache_id, key, key_hint + self._get_best_node(key, key_hint), self._cache_id, key, key_hint ) @status_to_exception(CacheError) @@ -628,7 +498,7 @@ def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ return cache_remove_keys( - self.get_best_node(), self._cache_id, keys + self._get_best_node(), self._cache_id, keys ) @status_to_exception(CacheError) @@ -636,7 +506,7 @@ def remove_all(self): """ Removes all cache entries, notifying listeners and cache writers. """ - return cache_remove_all(self.get_best_node(), self._cache_id) + return cache_remove_all(self._get_best_node(), self._cache_id) @status_to_exception(CacheError) def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @@ -655,7 +525,7 @@ def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): key_hint = AnyDataObject.map_python_type(key) return cache_remove_if_equals( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, sample, key_hint, sample_hint @@ -685,7 +555,7 @@ def replace_if_equals( key_hint = AnyDataObject.map_python_type(key) result = cache_replace_if_equals( - self.get_best_node(key, key_hint), + self._get_best_node(key, key_hint), self._cache_id, key, sample, value, key_hint, sample_hint, value_hint @@ -704,7 +574,7 @@ def get_size(self, peek_modes=None): :return: integer number of cache entries. """ return cache_get_size( - self.get_best_node(), self._cache_id, peek_modes + self._get_best_node(), self._cache_id, peek_modes ) def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False): diff --git a/pyignite/client.py b/pyignite/client.py index 05df617..2f24c43 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -39,25 +39,28 @@ :py:meth:`~pyignite.client.Client.query_binary_type` methods operates the local (class-wise) registry for Ignite Complex objects. """ - +import time from collections import defaultdict, OrderedDict import random import re from itertools import chain -from typing import Iterable, Type, Union, Any +from typing import Iterable, Type, Union, Any, Dict +from .api import cache_get_node_partitions from .api.binary import get_binary_type, put_binary_type from .api.cache_config import cache_get_names from .cursors import SqlFieldsCursor -from .cache import Cache, create_cache, get_cache, get_or_create_cache +from .cache import Cache, create_cache, get_cache, get_or_create_cache, BaseCache from .connection import Connection -from .constants import IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER -from .datatypes import BinaryObject +from .constants import IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER, AFFINITY_RETRIES, AFFINITY_DELAY +from .datatypes import BinaryObject, AnyDataObject +from .datatypes.base import IgniteDataType from .datatypes.internal import tc_map from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors from .stream import BinaryStream, READ_BACKWARD from .utils import ( - cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, is_wrapped + cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, is_wrapped, + get_field_by_id, unsigned ) from .binary import GenericObjectMeta @@ -79,6 +82,7 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = False, * self._current_node = 0 self._partition_aware = partition_aware self.affinity_version = (0, 0) + self._affinity = {'version': self.affinity_version, 'partition_mapping': defaultdict(dict)} self._protocol_version = None @property @@ -242,6 +246,76 @@ def _get_from_registry(self, type_id, schema): return None return self._registry[type_id] + def register_cache(self, cache_id: int): + if self.partition_aware and cache_id not in self._affinity: + self._affinity['partition_mapping'][cache_id] = {} + + def _get_affinity_key(self, cache_id, key, key_hint=None): + if key_hint is None: + key_hint = AnyDataObject.map_python_type(key) + + cache_partition_mapping = self._cache_partition_mapping(cache_id) + if cache_partition_mapping and cache_partition_mapping.get('is_applicable'): + config = cache_partition_mapping.get('cache_config') + if config: + affinity_key_id = config.get(key_hint.type_id) + + if affinity_key_id and isinstance(key, GenericObjectMeta): + return get_field_by_id(key, affinity_key_id) + + return key, key_hint + + def _update_affinity(self, full_affinity): + self._affinity['version'] = full_affinity['version'] + + full_mapping = full_affinity.get('partition_mapping') + if full_mapping: + self._affinity['partition_mapping'].update(full_mapping) + + def _caches_to_update_affinity(self): + if self._affinity['version'] < self.affinity_version: + return list(self._affinity['partition_mapping'].keys()) + else: + return list(c_id for c_id, c_mapping in self._affinity['partition_mapping'].items() if not c_mapping) + + def _cache_partition_mapping(self, cache_id): + return self._affinity['partition_mapping'][cache_id] + + def _get_node_by_hashcode(self, cache_id, hashcode, parts): + """ + Get node by key hashcode. Calculate partition and return node on that it is primary. + (algorithm is taken from `RendezvousAffinityFunction.java`) + """ + + # calculate partition for key or affinity key + # (algorithm is taken from `RendezvousAffinityFunction.java`) + mask = parts - 1 + + if parts & mask == 0: + part = (hashcode ^ (unsigned(hashcode) >> 16)) & mask + else: + part = abs(hashcode // parts) + + assert 0 <= part < parts, 'Partition calculation has failed' + + node_mapping = self._cache_partition_mapping(cache_id).get('node_mapping') + if not node_mapping: + return None + + node_uuid, best_conn = None, None + for u, p in node_mapping.items(): + if part in p: + node_uuid = u + break + + if node_uuid: + for n in self._nodes: + if n.uuid == node_uuid: + best_conn = n + break + if best_conn and best_conn.alive: + return best_conn + class _ConnectionContextManager: def __init__(self, client, nodes): @@ -476,6 +550,81 @@ def unwrap_binary(self, value: Any) -> Any: return BinaryObject.to_python(stream.read_ctype(data_class, direction=READ_BACKWARD), self) return value + @status_to_exception(CacheError) + def _get_affinity(self, conn: 'Connection', caches: Iterable[int]) -> Dict: + """ + Queries server for affinity mappings. Retries in case + of an intermittent error (most probably “Getting affinity for topology + version earlier than affinity is calculated”). + + :param conn: connection to Ignite server, + :param caches: Ids of caches, + :return: OP_CACHE_PARTITIONS operation result value. + """ + for _ in range(AFFINITY_RETRIES or 1): + result = cache_get_node_partitions(conn, caches) + if result.status == 0 and result.value['partition_mapping']: + break + time.sleep(AFFINITY_DELAY) + + return result + + def get_best_node( + self, cache: Union[int, str, 'BaseCache'], key: Any = None, key_hint: 'IgniteDataType' = None + ) -> 'Connection': + """ + Returns the node from the list of the nodes, opened by client, that + most probably contains the needed key-value pair. See IEP-23. + + This method is not a part of the public API. Unless you wish to + extend the `pyignite` capabilities (with additional testing, logging, + examining connections, et c.) you probably should not use it. + + :param cache: Ignite cache, cache name or cache id, + :param key: (optional) pythonic key, + :param key_hint: (optional) Ignite data type, for which the given key + should be converted, + :return: Ignite connection object. + """ + conn = self.random_node + + if self.partition_aware and key is not None: + caches = self._caches_to_update_affinity() + if caches: + # update partition mapping + while True: + try: + full_affinity = self._get_affinity(conn, caches) + break + except connection_errors: + # retry if connection failed + conn = self.random_node + pass + except CacheError: + # server did not create mapping in time + return conn + + self._update_affinity(full_affinity) + + for conn in self._nodes: + if not conn.alive: + conn.reconnect() + + c_id = cache.cache_id if isinstance(cache, BaseCache) else cache_id(cache) + parts = self._cache_partition_mapping(c_id).get('number_of_partitions') + + if not parts: + return conn + + key, key_hint = self._get_affinity_key(c_id, key, key_hint) + hashcode = key_hint.hashcode(key, self) + + best_node = self._get_node_by_hashcode(c_id, hashcode, parts) + if best_node: + return best_node + + return conn + def create_cache(self, settings: Union[str, dict]) -> 'Cache': """ Creates Ignite cache by name. Raises `CacheError` if such a cache is diff --git a/tests/affinity/test_affinity.py b/tests/affinity/test_affinity.py index b1bcec7..64b9cc5 100644 --- a/tests/affinity/test_affinity.py +++ b/tests/affinity/test_affinity.py @@ -309,7 +309,7 @@ def check_peek_value(node, best_node, result): def inner(): cache.put(key, value, key_hint=key_hint) - best_node = cache.get_best_node(key, key_hint=key_hint) + best_node = client.get_best_node(cache, key, key_hint=key_hint) for node in filter(lambda n: n.alive, client._nodes): result = cache_local_peek(node, cache.cache_id, key, key_hint=key_hint) @@ -318,7 +318,7 @@ def inner(): async def inner_async(): await cache.put(key, value, key_hint=key_hint) - best_node = await cache.get_best_node(key, key_hint=key_hint) + best_node = await client.get_best_node(cache, key, key_hint=key_hint) for node in filter(lambda n: n.alive, client._nodes): result = await cache_local_peek_async(node, cache.cache_id, key, key_hint=key_hint) diff --git a/tests/affinity/test_affinity_request_routing.py b/tests/affinity/test_affinity_request_routing.py index 64197ff..9c94aa4 100644 --- a/tests/affinity/test_affinity_request_routing.py +++ b/tests/affinity/test_affinity_request_routing.py @@ -14,6 +14,7 @@ # limitations under the License. import asyncio +import contextlib from collections import OrderedDict, deque import random @@ -28,6 +29,11 @@ from pyignite.datatypes.prop_codes import PROP_NAME, PROP_BACKUPS_NUMBER, PROP_CACHE_KEY_CONFIGURATION, PROP_CACHE_MODE from tests.util import wait_for_condition, wait_for_condition_async, start_ignite, kill_process_tree +try: + from contextlib import asynccontextmanager +except ImportError: + from async_generator import asynccontextmanager + requests = deque() old_send = Connection.send old_send_async = AioConnection._send @@ -208,25 +214,36 @@ class AffinityTestType1( @pytest.fixture -def client_routed_cache(request): +def client_routed(): client = Client(partition_aware=True) try: client.connect(client_routed_connection_string) - yield client.get_or_create_cache(request.node.name) + yield client finally: client.close() @pytest.fixture -async def async_client_routed_cache(request): +def client_routed_cache(client_routed, request): + yield client_routed.get_or_create_cache(request.node.name) + + +@pytest.fixture +async def async_client_routed(): client = AioClient(partition_aware=True) try: await client.connect(client_routed_connection_string) - yield await client.get_or_create_cache(request.node.name) + yield client finally: await client.close() +@pytest.fixture +async def async_client_routed_cache(async_client_routed, request): + cache = await async_client_routed.get_or_create_cache(request.node.name) + yield cache + + def test_cache_operation_routed_to_new_cluster_node(client_routed_cache): __perform_cache_operation_routed_to_new_node(client_routed_cache) @@ -345,3 +362,114 @@ async def inner_async(): assert idx1 != idx2 return inner_async() if isinstance(cache, AioCache) else inner() + + +@contextlib.contextmanager +def create_caches(client): + caches = [] + try: + caches = [client.create_cache(f'test_cache_{i}') for i in range(0, 10)] + yield caches + finally: + for cache in caches: + try: + cache.destroy() + except: # noqa: 13 + cache.destroy() # Retry if connection failed. + pass + + +@asynccontextmanager +async def create_caches_async(client): + caches = [] + try: + caches = await asyncio.gather(*[client.create_cache(f'test_cache_{i}') for i in range(0, 10)]) + yield caches + finally: + for cache in caches: + try: + await cache.destroy() + except: # noqa: 13 + await cache.destroy() # Retry if connection failed. + pass + + +def test_new_registered_cache_affinity(client): + with create_caches(client) as caches: + key = 12 + test_cache = random.choice(caches) + test_cache.put(key, key) + wait_for_affinity_distribution(test_cache, key, 3) + + caches.append(client.create_cache('new_cache')) + + for cache in caches: + cache.get(key) + assert requests.pop() == 3 + + +@pytest.mark.asyncio +async def test_new_registered_cache_affinity_async(async_client): + async with create_caches_async(async_client) as caches: + key = 12 + test_cache = random.choice(caches) + test_cache.put(key, key) + await wait_for_affinity_distribution_async(test_cache, key, 3) + + caches.append(await async_client.create_cache('new_cache')) + + for cache in caches: + await cache.get(key) + assert requests.pop() == 3 + + +def test_all_registered_cache_updated_on_new_server(client_routed): + with create_caches(client_routed) as caches: + key = 12 + test_cache = random.choice(caches) + wait_for_affinity_distribution(test_cache, key, 3) + test_cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + wait_for_affinity_distribution(test_cache, key, 4) + + for cache in caches: + cache.get(key) + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_all_registered_cache_updated_on_new_server_async(async_client_routed): + async with create_caches_async(async_client_routed) as caches: + key = 12 + test_cache = random.choice(caches) + await wait_for_affinity_distribution_async(test_cache, key, 3) + await test_cache.put(key, key) + assert requests.pop() == 3 + + srv = start_ignite(idx=4) + try: + # Wait for rebalance and partition map exchange + await wait_for_affinity_distribution_async(test_cache, key, 4) + + for cache in caches: + await cache.get(key) + assert requests.pop() == 4 + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_update_affinity_concurrently(async_client): + async with create_caches_async(async_client) as caches: + key = 12 + await asyncio.gather(*[cache.put(key, key) for cache in caches]) + + for cache in caches: + await cache.get(key) + assert requests.pop() == 3 diff --git a/tests/common/test_cache_class.py b/tests/common/test_cache_class.py index 02dfa82..b035d8f 100644 --- a/tests/common/test_cache_class.py +++ b/tests/common/test_cache_class.py @@ -36,7 +36,7 @@ def test_cache_create(client): async def test_cache_create_async(async_client): cache = await async_client.get_or_create_cache('my_oop_cache') try: - assert (await cache.name()) == (await cache.settings())[PROP_NAME] == 'my_oop_cache' + assert cache.name == (await cache.settings())[PROP_NAME] == 'my_oop_cache' finally: await cache.destroy() @@ -94,7 +94,7 @@ async def test_cache_config_async(async_client, cache_config): await async_client.create_cache(cache_config) cache = await async_client.get_or_create_cache('my_oop_cache') try: - assert await cache.name() == cache_config[PROP_NAME] + assert cache.name == cache_config[PROP_NAME] assert (await cache.settings())[PROP_CACHE_KEY_CONFIGURATION] == cache_config[PROP_CACHE_KEY_CONFIGURATION] finally: await cache.destroy() diff --git a/tests/common/test_cache_size.py b/tests/common/test_cache_size.py index d134903..f2ec3ed 100644 --- a/tests/common/test_cache_size.py +++ b/tests/common/test_cache_size.py @@ -15,8 +15,11 @@ import pytest +from pyignite.datatypes.cache_config import WriteSynchronizationMode from pyignite.datatypes.key_value import PeekModes -from pyignite.datatypes.prop_codes import PROP_NAME, PROP_IS_ONHEAP_CACHE_ENABLED, PROP_BACKUPS_NUMBER +from pyignite.datatypes.prop_codes import ( + PROP_NAME, PROP_IS_ONHEAP_CACHE_ENABLED, PROP_BACKUPS_NUMBER, PROP_WRITE_SYNCHRONIZATION_MODE +) from tests.util import get_or_create_cache, get_or_create_cache_async test_params = [ @@ -24,7 +27,8 @@ { PROP_NAME: 'cache_onheap_backups_2', PROP_IS_ONHEAP_CACHE_ENABLED: True, - PROP_BACKUPS_NUMBER: 2 + PROP_BACKUPS_NUMBER: 2, + PROP_WRITE_SYNCHRONIZATION_MODE: WriteSynchronizationMode.FULL_SYNC }, [ [None, 1], diff --git a/tests/util.py b/tests/util.py index 064ac7a..5651739 100644 --- a/tests/util.py +++ b/tests/util.py @@ -26,7 +26,6 @@ import subprocess import time -from pyignite import Client, AioClient try: from contextlib import asynccontextmanager From 7c1d0cc12fa724989b9bed6e2a14f54c61228d3a Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Sat, 3 Apr 2021 10:10:47 +0300 Subject: [PATCH 26/62] IGNITE-14465 Add the ability to set and get cluster state This closes #27 --- pyignite/aio_client.py | 13 ++- pyignite/aio_cluster.py | 56 +++++++++++ pyignite/api/cluster.py | 106 ++++++++++++++++++++ pyignite/client.py | 33 ++++--- pyignite/cluster.py | 56 +++++++++++ pyignite/connection/aio_connection.py | 19 ++-- pyignite/connection/bitmask_feature.py | 57 +++++++++++ pyignite/connection/connection.py | 46 ++++----- pyignite/connection/handshake.py | 41 +++++--- pyignite/connection/protocol_context.py | 100 +++++++++++++++++++ pyignite/constants.py | 7 +- pyignite/datatypes/cluster_state.py | 28 ++++++ pyignite/exceptions.py | 18 +++- pyignite/queries/op_codes.py | 5 +- pyignite/queries/query.py | 4 +- pyignite/queries/response.py | 7 +- pyignite/stream/aio_cluster.py | 53 ++++++++++ tests/config/ignite-config.xml.jinja2 | 5 +- tests/custom/test_cluster.py | 125 ++++++++++++++++++++++++ tests/util.py | 20 ++-- 20 files changed, 716 insertions(+), 83 deletions(-) create mode 100644 pyignite/aio_cluster.py create mode 100644 pyignite/api/cluster.py create mode 100644 pyignite/cluster.py create mode 100644 pyignite/connection/bitmask_feature.py create mode 100644 pyignite/connection/protocol_context.py create mode 100644 pyignite/datatypes/cluster_state.py create mode 100644 pyignite/stream/aio_cluster.py create mode 100644 tests/custom/test_cluster.py diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 5e64450..1870878 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -17,6 +17,7 @@ from itertools import chain from typing import Iterable, Type, Union, Any, Dict +from .aio_cluster import AioCluster from .api import cache_get_node_partitions_async from .api.binary import get_binary_type_async, put_binary_type_async from .api.cache_config import cache_get_names_async @@ -92,7 +93,7 @@ async def _connect(self, nodes): if not self.partition_aware: try: - if self.protocol_version is None: + if self.protocol_context is None: # open connection before adding to the pool await conn.connect() @@ -120,7 +121,7 @@ async def _connect(self, nodes): await asyncio.gather(*reconnect_coro, return_exceptions=True) - if self.protocol_version is None: + if self.protocol_context is None: raise ReconnectError('Can not connect.') async def close(self): @@ -460,3 +461,11 @@ def sql( return AioSqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout) + + def get_cluster(self) -> 'AioCluster': + """ + Gets client cluster facade. + + :return: AioClient cluster facade. + """ + return AioCluster(self) diff --git a/pyignite/aio_cluster.py b/pyignite/aio_cluster.py new file mode 100644 index 0000000..6d76125 --- /dev/null +++ b/pyignite/aio_cluster.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains `AioCluster` that lets you get info and change state of the +whole cluster asynchronously. +""" +from pyignite.api.cluster import cluster_get_state_async, cluster_set_state_async +from pyignite.exceptions import ClusterError +from pyignite.utils import status_to_exception + + +class AioCluster: + """ + Ignite cluster abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.aio_client.AioClient.get_cluster` method instead. + """ + + def __init__(self, client: 'AioClient'): + self._client = client + + @status_to_exception(ClusterError) + async def get_state(self): + """ + Gets current cluster state. + + :return: Current cluster state. This is one of ClusterState.INACTIVE, + ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + """ + return await cluster_get_state_async(await self._client.random_node()) + + @status_to_exception(ClusterError) + async def set_state(self, state): + """ + Changes current cluster state to the given. + + Note: Deactivation clears in-memory caches (without persistence) + including the system caches. + + :param state: New cluster state. This is one of ClusterState.INACTIVE, + ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + """ + return await cluster_set_state_async(await self._client.random_node(), state) diff --git a/pyignite/api/cluster.py b/pyignite/api/cluster.py new file mode 100644 index 0000000..e134239 --- /dev/null +++ b/pyignite/api/cluster.py @@ -0,0 +1,106 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pyignite.api import APIResult +from pyignite.connection import AioConnection, Connection +from pyignite.datatypes import Byte +from pyignite.exceptions import NotSupportedByClusterError +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import OP_CLUSTER_GET_STATE, OP_CLUSTER_CHANGE_STATE + + +def cluster_get_state(connection: 'Connection', query_id=None) -> 'APIResult': + """ + Get cluster state. + + :param connection: Connection to use, + :param query_id: (optional) a value generated by client and returned as-is + in response.query_id. When the parameter is omitted, a random value + is generated, + :return: API result data object. Contains zero status and a state + retrieved on success, non-zero status and an error description on failure. + """ + return __cluster_get_state(connection, query_id) + + +async def cluster_get_state_async(connection: 'AioConnection', query_id=None) -> 'APIResult': + """ + Async version of cluster_get_state + """ + return await __cluster_get_state(connection, query_id) + + +def __post_process_get_state(result): + if result.status == 0: + result.value = result.value['state'] + return result + + +def __cluster_get_state(connection, query_id): + if not connection.protocol_context.is_cluster_api_supported(): + raise NotSupportedByClusterError('Cluster API is not supported by the cluster') + + query_struct = Query(OP_CLUSTER_GET_STATE, query_id=query_id) + return query_perform( + query_struct, connection, + response_config=[('state', Byte)], + post_process_fun=__post_process_get_state + ) + + +def cluster_set_state(connection: 'Connection', state: int, query_id=None) -> 'APIResult': + """ + Set cluster state. + + :param connection: Connection to use, + :param state: State to set, + :param query_id: (optional) a value generated by client and returned as-is + in response.query_id. When the parameter is omitted, a random value + is generated, + :return: API result data object. Contains zero status if a value + is written, non-zero status and an error description otherwise. + """ + return __cluster_set_state(connection, state, query_id) + + +async def cluster_set_state_async(connection: 'AioConnection', state: int, query_id=None) -> 'APIResult': + """ + Async version of cluster_get_state + """ + return await __cluster_set_state(connection, state, query_id) + + +def __post_process_set_state(result): + if result.status == 0: + result.value = result.value['state'] + return result + + +def __cluster_set_state(connection, state, query_id): + if not connection.protocol_context.is_cluster_api_supported(): + raise NotSupportedByClusterError('Cluster API is not supported by the cluster') + + query_struct = Query( + OP_CLUSTER_CHANGE_STATE, + [ + ('state', Byte) + ], + query_id=query_id + ) + return query_perform( + query_struct, connection, + query_params={ + 'state': state, + } + ) diff --git a/pyignite/client.py b/pyignite/client.py index 2f24c43..b7c4046 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -49,6 +49,7 @@ from .api import cache_get_node_partitions from .api.binary import get_binary_type, put_binary_type from .api.cache_config import cache_get_names +from .cluster import Cluster from .cursors import SqlFieldsCursor from .cache import Cache, create_cache, get_cache, get_or_create_cache, BaseCache from .connection import Connection @@ -83,24 +84,23 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = False, * self._partition_aware = partition_aware self.affinity_version = (0, 0) self._affinity = {'version': self.affinity_version, 'partition_mapping': defaultdict(dict)} - self._protocol_version = None + self._protocol_context = None @property - def protocol_version(self): + def protocol_context(self): """ - Returns the tuple of major, minor, and revision numbers of the used - thin protocol version, or None, if no connection to the Ignite cluster - was not yet established. + Returns protocol context, or None, if no connection to the Ignite + cluster was not yet established. This method is not a part of the public API. Unless you wish to extend the `pyignite` capabilities (with additional testing, logging, examining connections, et c.) you probably should not use it. """ - return self._protocol_version + return self._protocol_context - @protocol_version.setter - def protocol_version(self, value): - self._protocol_version = value + @protocol_context.setter + def protocol_context(self, value): + self._protocol_context = value @property def partition_aware(self): @@ -108,7 +108,8 @@ def partition_aware(self): @property def partition_awareness_supported_by_protocol(self): - return self.protocol_version is not None and self.protocol_version >= (1, 4, 0) + return self.protocol_context is not None \ + and self.protocol_context.is_partition_awareness_supported() @property def compact_footer(self) -> bool: @@ -379,7 +380,7 @@ def _connect(self, nodes): conn = Connection(self, host, port, **self._connection_args) try: - if self.protocol_version is None or self.partition_aware: + if self.protocol_context is None or self.partition_aware: # open connection before adding to the pool conn.connect() @@ -396,7 +397,7 @@ def _connect(self, nodes): self._nodes.append(conn) - if self.protocol_version is None: + if self.protocol_context is None: raise ReconnectError('Can not connect.') def close(self): @@ -727,3 +728,11 @@ def sql( return SqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout) + + def get_cluster(self) -> 'Cluster': + """ + Gets client cluster facade. + + :return: Client cluster facade. + """ + return Cluster(self) diff --git a/pyignite/cluster.py b/pyignite/cluster.py new file mode 100644 index 0000000..f10afe4 --- /dev/null +++ b/pyignite/cluster.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains `Cluster` that lets you get info and change state of the +whole cluster. +""" +from pyignite.api.cluster import cluster_get_state, cluster_set_state +from pyignite.exceptions import ClusterError +from pyignite.utils import status_to_exception + + +class Cluster: + """ + Ignite cluster abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.client.Client.get_cluster` method instead. + """ + + def __init__(self, client: 'Client'): + self._client = client + + @status_to_exception(ClusterError) + def get_state(self): + """ + Gets current cluster state. + + :return: Current cluster state. This is one of ClusterState.INACTIVE, + ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + """ + return cluster_get_state(self._client.random_node) + + @status_to_exception(ClusterError) + def set_state(self, state): + """ + Changes current cluster state to the given. + + Note: Deactivation clears in-memory caches (without persistence) + including the system caches. + + :param state: New cluster state. This is one of ClusterState.INACTIVE, + ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + """ + return cluster_set_state(self._client.random_node, state) diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index e5c11da..ce32592 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -36,9 +36,11 @@ from pyignite.constants import PROTOCOLS, PROTOCOL_BYTE_ORDER from pyignite.exceptions import HandshakeError, SocketError, connection_errors +from .bitmask_feature import BitmaskFeature from .connection import BaseConnection from .handshake import HandshakeRequest, HandshakeResponse +from .protocol_context import ProtocolContext from .ssl import create_ssl_context from ..stream import AioBinaryStream @@ -112,27 +114,28 @@ async def _connect(self) -> Union[dict, OrderedDict]: detecting_protocol = False # choose highest version first - if self.client.protocol_version is None: + if self.client.protocol_context is None: detecting_protocol = True - self.client.protocol_version = max(PROTOCOLS) + self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) try: result = await self._connect_version() except HandshakeError as e: if e.expected_version in PROTOCOLS: - self.client.protocol_version = e.expected_version + self.client.protocol_context.version = e.expected_version result = await self._connect_version() else: raise e except connection_errors: # restore undefined protocol version if detecting_protocol: - self.client.protocol_version = None + self.client.protocol_context = None raise # connection is ready for end user + features = BitmaskFeature.from_array(result.get('features', None)) + self.client.protocol_context.features = features self.uuid = result.get('node_uuid', None) # version-specific (1.4+) - self.failed = False return result @@ -145,10 +148,10 @@ async def _connect_version(self) -> Union[dict, OrderedDict]: ssl_context = create_ssl_context(self.ssl_params) self._reader, self._writer = await asyncio.open_connection(self.host, self.port, ssl=ssl_context) - protocol_version = self.client.protocol_version + protocol_context = self.client.protocol_context hs_request = HandshakeRequest( - protocol_version, + protocol_context, self.username, self.password ) @@ -158,7 +161,7 @@ async def _connect_version(self) -> Union[dict, OrderedDict]: await self._send(stream.getbuffer(), reconnect=False) with AioBinaryStream(self.client, await self._recv(reconnect=False)) as stream: - hs_response = await HandshakeResponse.parse_async(stream, self.protocol_version) + hs_response = await HandshakeResponse.parse_async(stream, self.protocol_context) if hs_response.op_code == 0: self._close() diff --git a/pyignite/connection/bitmask_feature.py b/pyignite/connection/bitmask_feature.py new file mode 100644 index 0000000..80d51ad --- /dev/null +++ b/pyignite/connection/bitmask_feature.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from enum import IntFlag +from typing import Optional + +from pyignite.constants import PROTOCOL_BYTE_ORDER + + +class BitmaskFeature(IntFlag): + CLUSTER_API = 1 << 2 + + def __bytes__(self) -> bytes: + """ + Convert feature flags array to bytearray bitmask. + + :return: Bitmask as bytearray. + """ + full_bytes = self.bit_length() // 8 + 1 + return self.to_bytes(full_bytes, byteorder=PROTOCOL_BYTE_ORDER) + + @staticmethod + def all_supported() -> 'BitmaskFeature': + """ + Get all supported features. + + :return: All supported features. + """ + supported = BitmaskFeature(0) + for feature in BitmaskFeature: + supported |= feature + return supported + + @staticmethod + def from_array(features_array: bytes) -> Optional['BitmaskFeature']: + """ + Get features from bytearray. + + :param features_array: Feature bitmask as array, + :return: Return features. + """ + if features_array is None: + return None + return BitmaskFeature.from_bytes(features_array, byteorder=PROTOCOL_BYTE_ORDER) diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 901cb56..7d5778c 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -13,29 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - from collections import OrderedDict import socket from typing import Union from pyignite.constants import PROTOCOLS, IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER from pyignite.exceptions import HandshakeError, SocketError, connection_errors, AuthenticationError +from .bitmask_feature import BitmaskFeature from .handshake import HandshakeRequest, HandshakeResponse +from .protocol_context import ProtocolContext from .ssl import wrap, check_ssl_params from ..stream import BinaryStream @@ -83,19 +70,18 @@ def __repr__(self) -> str: return '{}:{}'.format(self.host or '?', self.port or '?') @property - def protocol_version(self): + def protocol_context(self): """ - Returns the tuple of major, minor, and revision numbers of the used - thin protocol version, or None, if no connection to the Ignite cluster - was yet established. + Returns protocol context, or None, if no connection to the Ignite + cluster was yet established. """ - return self.client.protocol_version + return self.client.protocol_context def _process_handshake_error(self, response): error_text = f'Handshake error: {response.message}' # if handshake fails for any reason other than protocol mismatch # (i.e. authentication error), server version is 0.0.0 - protocol_version = self.client.protocol_version + protocol_version = self.client.protocol_context.version server_version = (response.version_major, response.version_minor, response.version_patch) if any(server_version): @@ -118,7 +104,7 @@ class Connection(BaseConnection): * binary protocol connector. Encapsulates handshake and failover reconnection. """ - def __init__(self, client: 'Client', host: str, port: int, timeout: float = 2.0, + def __init__(self, client: 'Client', host: str, port: int, timeout: float = None, username: str = None, password: str = None, **ssl_params): """ Initialize connection. @@ -180,25 +166,27 @@ def connect(self) -> Union[dict, OrderedDict]: detecting_protocol = False # choose highest version first - if self.client.protocol_version is None: + if self.client.protocol_context is None: detecting_protocol = True - self.client.protocol_version = max(PROTOCOLS) + self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) try: result = self._connect_version() except HandshakeError as e: if e.expected_version in PROTOCOLS: - self.client.protocol_version = e.expected_version + self.client.protocol_context.version = e.expected_version result = self._connect_version() else: raise e except connection_errors: # restore undefined protocol version if detecting_protocol: - self.client.protocol_version = None + self.client.protocol_context = None raise # connection is ready for end user + features = BitmaskFeature.from_array(result.get('features', None)) + self.client.protocol_context.features = features self.uuid = result.get('node_uuid', None) # version-specific (1.4+) self.failed = False return result @@ -214,10 +202,10 @@ def _connect_version(self) -> Union[dict, OrderedDict]: self._socket = wrap(self._socket, self.ssl_params) self._socket.connect((self.host, self.port)) - protocol_version = self.client.protocol_version + protocol_context = self.client.protocol_context hs_request = HandshakeRequest( - protocol_version, + protocol_context, self.username, self.password ) @@ -227,7 +215,7 @@ def _connect_version(self) -> Union[dict, OrderedDict]: self.send(stream.getbuffer(), reconnect=False) with BinaryStream(self.client, self.recv(reconnect=False)) as stream: - hs_response = HandshakeResponse.parse(stream, self.protocol_version) + hs_response = HandshakeResponse.parse(stream, self.protocol_context) if hs_response.op_code == 0: self.close() diff --git a/pyignite/connection/handshake.py b/pyignite/connection/handshake.py index 0b0fe50..af7bdb3 100644 --- a/pyignite/connection/handshake.py +++ b/pyignite/connection/handshake.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Tuple +from typing import Optional -from pyignite.datatypes import Byte, Int, Short, String, UUIDObject +from pyignite.connection.protocol_context import ProtocolContext +from pyignite.datatypes import Byte, Int, Short, String, UUIDObject, ByteArrayObject from pyignite.datatypes.internal import Struct from pyignite.stream import READ_BACKWARD @@ -27,10 +28,10 @@ class HandshakeRequest: handshake_struct = None username = None password = None - protocol_version = None + protocol_context = None def __init__( - self, protocol_version: Tuple[int, int, int], + self, protocol_context: 'ProtocolContext', username: Optional[str] = None, password: Optional[str] = None ): fields = [ @@ -41,7 +42,9 @@ def __init__( ('version_patch', Short), ('client_code', Byte), ] - self.protocol_version = protocol_version + self.protocol_context = protocol_context + if self.protocol_context.is_feature_flags_supported(): + fields.append(('features', ByteArrayObject)) if username and password: self.username = username self.password = password @@ -58,14 +61,19 @@ async def from_python_async(self, stream): await self.handshake_struct.from_python_async(stream, self.__create_handshake_data()) def __create_handshake_data(self): + version = self.protocol_context.version handshake_data = { 'length': 8, 'op_code': OP_HANDSHAKE, - 'version_major': self.protocol_version[0], - 'version_minor': self.protocol_version[1], - 'version_patch': self.protocol_version[2], + 'version_major': version[0], + 'version_minor': version[1], + 'version_patch': version[2], 'client_code': 2, # fixed value defined by protocol } + if self.protocol_context.is_feature_flags_supported(): + features = bytes(self.protocol_context.features) + handshake_data['features'] = features + handshake_data['length'] += 5 + len(features) if self.username and self.password: handshake_data.update({ 'username': self.username, @@ -96,12 +104,12 @@ def __getattr__(self, item): return self.get(item) @classmethod - def parse(cls, stream, protocol_version): + def parse(cls, stream, protocol_context): start_class = cls.__response_start.parse(stream) start = stream.read_ctype(start_class, direction=READ_BACKWARD) data = cls.__response_start.to_python(start) - response_end = cls.__create_response_end(data, protocol_version) + response_end = cls.__create_response_end(data, protocol_context) if response_end: end_class = response_end.parse(stream) end = stream.read_ctype(end_class, direction=READ_BACKWARD) @@ -110,12 +118,12 @@ def parse(cls, stream, protocol_version): return cls(data) @classmethod - async def parse_async(cls, stream, protocol_version): + async def parse_async(cls, stream, protocol_context): start_class = cls.__response_start.parse(stream) start = stream.read_ctype(start_class, direction=READ_BACKWARD) data = await cls.__response_start.to_python_async(start) - response_end = cls.__create_response_end(data, protocol_version) + response_end = cls.__create_response_end(data, protocol_context) if response_end: end_class = await response_end.parse_async(stream) end = stream.read_ctype(end_class, direction=READ_BACKWARD) @@ -124,7 +132,7 @@ async def parse_async(cls, stream, protocol_version): return cls(data) @classmethod - def __create_response_end(cls, start_data, protocol_version): + def __create_response_end(cls, start_data, protocol_context): response_end = None if start_data['op_code'] == 0: response_end = Struct([ @@ -134,7 +142,12 @@ def __create_response_end(cls, start_data, protocol_version): ('message', String), ('client_status', Int) ]) - elif protocol_version >= (1, 4, 0): + elif protocol_context.is_feature_flags_supported(): + response_end = Struct([ + ('features', ByteArrayObject), + ('node_uuid', UUIDObject), + ]) + elif protocol_context.is_partition_awareness_supported(): response_end = Struct([ ('node_uuid', UUIDObject), ]) diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py new file mode 100644 index 0000000..54f5240 --- /dev/null +++ b/pyignite/connection/protocol_context.py @@ -0,0 +1,100 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +from pyignite.connection.bitmask_feature import BitmaskFeature + + +class ProtocolContext: + """ + Protocol context. Provides ability to easily check supported supported + protocol features. + """ + + def __init__(self, version: Tuple[int, int, int], features: BitmaskFeature = None): + self._version = version + self._features = features + self._ensure_consistency() + + def __hash__(self): + return hash((self._version, self._features)) + + def __eq__(self, other): + return isinstance(other, ProtocolContext) and \ + self.version == other.version and \ + self.features == other.features + + def _ensure_consistency(self): + if not self.is_feature_flags_supported(): + self._features = None + + @property + def version(self): + return getattr(self, '_version', None) + + @version.setter + def version(self, version: Tuple[int, int, int]): + """ + Set version. + + This call may result in features being reset to None if the protocol + version does not support feature masks. + + :param version: Version to set. + """ + setattr(self, '_version', version) + self._ensure_consistency() + + @property + def features(self): + return getattr(self, '_features', None) + + @features.setter + def features(self, features: BitmaskFeature): + """ + Try and set new feature set. + + If features are not supported by the protocol, None is set as features + instead. + + :param features: Features to set. + """ + setattr(self, '_features', features) + self._ensure_consistency() + + def is_partition_awareness_supported(self) -> bool: + """ + Check whether partition awareness supported by the current protocol. + """ + return self.version >= (1, 4, 0) + + def is_status_flags_supported(self) -> bool: + """ + Check whether status flags supported by the current protocol. + """ + return self.version >= (1, 4, 0) + + def is_feature_flags_supported(self) -> bool: + """ + Check whether feature flags supported by the current protocol. + """ + return self.version >= (1, 7, 0) + + def is_cluster_api_supported(self) -> bool: + """ + Check whether cluster API supported by the current protocol. + """ + return self.features and BitmaskFeature.CLUSTER_API in self.features diff --git a/pyignite/constants.py b/pyignite/constants.py index 02f7124..c08a3ce 100644 --- a/pyignite/constants.py +++ b/pyignite/constants.py @@ -31,14 +31,17 @@ ] PROTOCOLS = { + (1, 7, 0), + (1, 6, 0), + (1, 5, 0), (1, 4, 0), (1, 3, 0), (1, 2, 0), } PROTOCOL_VERSION_MAJOR = 1 -PROTOCOL_VERSION_MINOR = 4 -PROTOCOL_VERSION_PATCH = 0 +PROTOCOL_VERSION_MINOR = 7 +PROTOCOL_VERSION_PATCH = 1 MAX_LONG = 9223372036854775807 MIN_LONG = -9223372036854775808 diff --git a/pyignite/datatypes/cluster_state.py b/pyignite/datatypes/cluster_state.py new file mode 100644 index 0000000..863a1d2 --- /dev/null +++ b/pyignite/datatypes/cluster_state.py @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import IntEnum + + +class ClusterState(IntEnum): + #: Cluster deactivated. Cache operations aren't allowed. + INACTIVE = 0 + + #: Cluster activated. All cache operations are allowed. + ACTIVE = 1 + + #: Cluster activated. Cache read operation allowed, Cache data change operation + #: aren't allowed. + ACTIVE_READ_ONLY = 2 diff --git a/pyignite/exceptions.py b/pyignite/exceptions.py index 579aa29..215ccd0 100644 --- a/pyignite/exceptions.py +++ b/pyignite/exceptions.py @@ -65,7 +65,7 @@ class ParameterError(Exception): class CacheError(Exception): """ - This exception is raised, whenever any remote Thin client operation + This exception is raised, whenever any remote Thin client cache operation returns an error. """ pass @@ -93,4 +93,20 @@ class SQLError(CacheError): pass +class ClusterError(Exception): + """ + This exception is raised, whenever any remote Thin client cluster operation + returns an error. + """ + pass + + +class NotSupportedByClusterError(Exception): + """ + This exception is raised, whenever cluster is not supported specific + operation probably because it is outdated. + """ + pass + + connection_errors = (IOError, OSError, EOFError) diff --git a/pyignite/queries/op_codes.py b/pyignite/queries/op_codes.py index 7372713..c152f7c 100644 --- a/pyignite/queries/op_codes.py +++ b/pyignite/queries/op_codes.py @@ -61,7 +61,10 @@ OP_QUERY_SQL_FIELDS = 2004 OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE = 2005 -P_GET_BINARY_TYPE_NAME = 3000 +OP_GET_BINARY_TYPE_NAME = 3000 OP_REGISTER_BINARY_TYPE_NAME = 3001 OP_GET_BINARY_TYPE = 3002 OP_PUT_BINARY_TYPE = 3003 + +OP_CLUSTER_GET_STATE = 5000 +OP_CLUSTER_CHANGE_STATE = 5001 diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index beea5d9..d9e6aaf 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -124,7 +124,7 @@ def perform( self.from_python(stream, query_params) response_data = conn.request(stream.getbuffer()) - response_struct = self.response_type(protocol_version=conn.protocol_version, + response_struct = self.response_type(protocol_context=conn.protocol_context, following=response_config, **kwargs) with BinaryStream(conn.client, response_data) as stream: @@ -156,7 +156,7 @@ async def perform_async( await self.from_python_async(stream, query_params) data = await conn.request(stream.getbuffer()) - response_struct = self.response_type(protocol_version=conn.protocol_version, + response_struct = self.response_type(protocol_context=conn.protocol_context, following=response_config, **kwargs) with AioBinaryStream(conn.client, data) as stream: diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index 83a6e6a..6495802 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -19,6 +19,7 @@ from collections import OrderedDict import ctypes +from pyignite.connection.protocol_context import ProtocolContext from pyignite.constants import RHF_TOPOLOGY_CHANGED, RHF_ERROR from pyignite.datatypes import AnyDataObject, Bool, Int, Long, String, StringArray, Struct from pyignite.datatypes.binary import body_struct, enum_struct, schema_struct @@ -29,7 +30,7 @@ @attr.s class Response: following = attr.ib(type=list, factory=list) - protocol_version = attr.ib(type=tuple, factory=tuple) + protocol_context = attr.ib(type=type(ProtocolContext), default=None) _response_header = None _response_class_name = 'Response' @@ -44,7 +45,7 @@ def __build_header(self): ('query_id', ctypes.c_longlong), ] - if self.protocol_version and self.protocol_version >= (1, 4, 0): + if self.protocol_context.is_status_flags_supported(): fields.append(('flags', ctypes.c_short)) else: fields.append(('status_code', ctypes.c_int),) @@ -68,7 +69,7 @@ def __parse_header(self, stream): fields = [] has_error = False - if self.protocol_version and self.protocol_version >= (1, 4, 0): + if self.protocol_context.is_status_flags_supported(): if header.flags & RHF_TOPOLOGY_CHANGED: fields = [ ('affinity_version', ctypes.c_longlong), diff --git a/pyignite/stream/aio_cluster.py b/pyignite/stream/aio_cluster.py new file mode 100644 index 0000000..8a2f98e --- /dev/null +++ b/pyignite/stream/aio_cluster.py @@ -0,0 +1,53 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains `AioCluster` that lets you get info and change state of the +whole cluster. +""" +from pyignite import AioClient +from pyignite.api.cluster import cluster_get_state_async, cluster_set_state_async + + +class AioCluster: + """ + Ignite cluster abstraction. Users should never use this class directly, + but construct its instances with + :py:meth:`~pyignite.aio_client.AioClient.get_cluster` method instead. + """ + + def __init__(self, client: 'AioClient'): + self._client = client + + async def get_state(self): + """ + Gets current cluster state. + + :return: Current cluster state. This is one of ClusterState.INACTIVE, + ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + """ + return await cluster_get_state_async(await self._client.random_node()) + + async def set_state(self, state): + """ + Changes current cluster state to the given. + + Note: Deactivation clears in-memory caches (without persistence) + including the system caches. + + :param state: New cluster state. This is one of ClusterState.INACTIVE, + ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + """ + return await cluster_set_state_async(await self._client.random_node(), state) diff --git a/tests/config/ignite-config.xml.jinja2 b/tests/config/ignite-config.xml.jinja2 index 2bf5129..325a581 100644 --- a/tests/config/ignite-config.xml.jinja2 +++ b/tests/config/ignite-config.xml.jinja2 @@ -31,7 +31,7 @@ - {% if use_auth %} + {% if use_persistence %} {% endif %} @@ -51,9 +51,8 @@ {% endif %} - {% if use_ssl %} - + {% endif %} diff --git a/tests/custom/test_cluster.py b/tests/custom/test_cluster.py new file mode 100644 index 0000000..e82e238 --- /dev/null +++ b/tests/custom/test_cluster.py @@ -0,0 +1,125 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from pyignite import Client, AioClient +from pyignite.exceptions import CacheError +from tests.util import clear_ignite_work_dir, start_ignite_gen + +from pyignite.datatypes.cluster_state import ClusterState + + +@pytest.fixture(params=['with-persistence', 'without-persistence']) +def with_persistence(request): + yield request.param == 'with-persistence' + + +@pytest.fixture(autouse=True) +def cleanup(): + clear_ignite_work_dir() + yield None + clear_ignite_work_dir() + + +@pytest.fixture(autouse=True) +def server1(with_persistence, cleanup): + yield from start_ignite_gen(idx=1, use_persistence=with_persistence) + + +@pytest.fixture(autouse=True) +def server2(with_persistence, cleanup): + yield from start_ignite_gen(idx=2, use_persistence=with_persistence) + + +def test_cluster_set_active(with_persistence): + key = 42 + val = 42 + start_state = ClusterState.INACTIVE if with_persistence else ClusterState.ACTIVE + + client = Client() + with client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802)]): + cluster = client.get_cluster() + assert cluster.get_state() == start_state + + cluster.set_state(ClusterState.ACTIVE) + assert cluster.get_state() == ClusterState.ACTIVE + + cache = client.get_or_create_cache("test_cache") + cache.put(key, val) + assert cache.get(key) == val + + cluster.set_state(ClusterState.ACTIVE_READ_ONLY) + assert cluster.get_state() == ClusterState.ACTIVE_READ_ONLY + + assert cache.get(key) == val + with pytest.raises(CacheError): + cache.put(key, val + 1) + + cluster.set_state(ClusterState.INACTIVE) + assert cluster.get_state() == ClusterState.INACTIVE + + with pytest.raises(CacheError): + cache.get(key) + + with pytest.raises(CacheError): + cache.put(key, val + 1) + + cluster.set_state(ClusterState.ACTIVE) + assert cluster.get_state() == ClusterState.ACTIVE + + cache.put(key, val + 2) + assert cache.get(key) == val + 2 + + +@pytest.mark.asyncio +async def test_cluster_set_active_async(with_persistence): + key = 42 + val = 42 + start_state = ClusterState.INACTIVE if with_persistence else ClusterState.ACTIVE + + client = AioClient() + async with client.connect([("127.0.0.1", 10801), ("127.0.0.1", 10802)]): + cluster = client.get_cluster() + assert await cluster.get_state() == start_state + + await cluster.set_state(ClusterState.ACTIVE) + assert await cluster.get_state() == ClusterState.ACTIVE + + cache = await client.get_or_create_cache("test_cache") + await cache.put(key, val) + assert await cache.get(key) == val + + await cluster.set_state(ClusterState.ACTIVE_READ_ONLY) + assert await cluster.get_state() == ClusterState.ACTIVE_READ_ONLY + + assert await cache.get(key) == val + with pytest.raises(CacheError): + await cache.put(key, val + 1) + + await cluster.set_state(ClusterState.INACTIVE) + assert await cluster.get_state() == ClusterState.INACTIVE + + with pytest.raises(CacheError): + await cache.get(key) + + with pytest.raises(CacheError): + await cache.put(key, val + 1) + + await cluster.set_state(ClusterState.ACTIVE) + assert await cluster.get_state() == ClusterState.ACTIVE + + await cache.put(key, val + 2) + assert await cache.get(key) == val + 2 diff --git a/tests/util.py b/tests/util.py index 5651739..af3b70e 100644 --- a/tests/util.py +++ b/tests/util.py @@ -155,7 +155,7 @@ def create_config_file(tpl_name, file_name, **kwargs): f.write(template.render(**kwargs)) -def start_ignite(idx=1, debug=False, use_ssl=False, use_auth=False): +def start_ignite(idx=1, debug=False, use_ssl=False, use_auth=False, use_persistence=False): clear_logs(idx) runner = get_ignite_runner() @@ -166,8 +166,16 @@ def start_ignite(idx=1, debug=False, use_ssl=False, use_auth=False): env["JVM_OPTS"] = "-Djava.net.preferIPv4Stack=true -Xdebug -Xnoagent -Djava.compiler=NONE " \ "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005 " - params = {'ignite_instance_idx': str(idx), 'ignite_client_port': 10800 + idx, 'use_ssl': use_ssl, - 'use_auth': use_auth} + if use_auth: + use_persistence = True + + params = { + 'ignite_instance_idx': str(idx), + 'ignite_client_port': 10800 + idx, + 'use_ssl': use_ssl, + 'use_auth': use_auth, + 'use_persistence': use_persistence, + } create_config_file('log4j.xml.jinja2', f'log4j-{idx}.xml', **params) create_config_file('ignite-config.xml.jinja2', f'ignite-config-{idx}.xml', **params) @@ -177,7 +185,7 @@ def start_ignite(idx=1, debug=False, use_ssl=False, use_auth=False): srv = subprocess.Popen(ignite_cmd, env=env, cwd=get_test_dir()) - started = wait_for_condition(lambda: check_server_started(idx), timeout=30) + started = wait_for_condition(lambda: check_server_started(idx), timeout=60) if started: return srv @@ -185,8 +193,8 @@ def start_ignite(idx=1, debug=False, use_ssl=False, use_auth=False): raise Exception("Failed to start Ignite: timeout while trying to connect") -def start_ignite_gen(idx=1, use_ssl=False, use_auth=False): - srv = start_ignite(idx, use_ssl=use_ssl, use_auth=use_auth) +def start_ignite_gen(idx=1, use_ssl=False, use_auth=False, use_persistence=False): + srv = start_ignite(idx, use_ssl=use_ssl, use_auth=use_auth, use_persistence=use_persistence) try: yield srv finally: From e48f4bea7f91325ad1e07056c9d236008b91ee7e Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Mon, 5 Apr 2021 14:14:49 +0300 Subject: [PATCH 27/62] IGNITE-14472 Multiple performance improvements This closes #28 --- pyignite/binary.py | 2 +- pyignite/connection/aio_connection.py | 42 +++++++++++++----- pyignite/connection/connection.py | 59 ++++++++++++++---------- pyignite/datatypes/internal.py | 64 +++++++++++++-------------- pyignite/datatypes/null_object.py | 2 +- pyignite/datatypes/standard.py | 2 +- pyignite/queries/query.py | 4 +- pyignite/queries/response.py | 48 ++++++++++---------- pyignite/stream/binary_stream.py | 51 +++++++++++++-------- 9 files changed, 159 insertions(+), 115 deletions(-) diff --git a/pyignite/binary.py b/pyignite/binary.py index 4e34267..5a5f895 100644 --- a/pyignite/binary.py +++ b/pyignite/binary.py @@ -201,7 +201,7 @@ def write_footer(obj, stream, header, header_class, schema_items, offsets, initi stream.write(schema) if save_to_buf: - obj._buffer = bytes(stream.mem_view(initial_pos, stream.tell() - initial_pos)) + obj._buffer = stream.slice(initial_pos, stream.tell() - initial_pos) obj._hashcode = header.hash_code def _setattr(self, attr_name: str, attr_value: Any): diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index ce32592..020f8d4 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -158,7 +158,7 @@ async def _connect_version(self) -> Union[dict, OrderedDict]: with AioBinaryStream(self.client) as stream: await hs_request.from_python_async(stream) - await self._send(stream.getbuffer(), reconnect=False) + await self._send(stream.getvalue(), reconnect=False) with AioBinaryStream(self.client, await self._recv(reconnect=False)) as stream: hs_response = await HandshakeResponse.parse_async(stream, self.protocol_context) @@ -185,7 +185,7 @@ async def _reconnect(self): except connection_errors: pass - async def request(self, data: Union[bytes, bytearray, memoryview]) -> bytearray: + async def request(self, data: Union[bytes, bytearray]) -> bytearray: """ Perform request. @@ -195,7 +195,7 @@ async def request(self, data: Union[bytes, bytearray, memoryview]) -> bytearray: await self._send(data) return await self._recv() - async def _send(self, data: Union[bytes, bytearray, memoryview], reconnect=True): + async def _send(self, data: Union[bytes, bytearray], reconnect=True): if self.closed: raise SocketError('Attempt to use closed connection.') @@ -212,21 +212,43 @@ async def _recv(self, reconnect=True) -> bytearray: if self.closed: raise SocketError('Attempt to use closed connection.') - with BytesIO() as stream: + data = bytearray(1024) + buffer = memoryview(data) + bytes_total_received, bytes_to_receive = 0, 0 + while True: try: - buf = await self._reader.readexactly(4) - response_len = int.from_bytes(buf, PROTOCOL_BYTE_ORDER) + chunk = await self._reader.read(len(buffer)) + bytes_received = len(chunk) + if bytes_received == 0: + raise SocketError('Connection broken.') - stream.write(buf) - - stream.write(await self._reader.readexactly(response_len)) + buffer[0:bytes_received] = chunk + bytes_total_received += bytes_received except connection_errors: self.failed = True if reconnect: await self._reconnect() raise - return bytearray(stream.getbuffer()) + if bytes_total_received < 4: + continue + elif bytes_to_receive == 0: + response_len = int.from_bytes(data[0:4], PROTOCOL_BYTE_ORDER) + bytes_to_receive = response_len + + if response_len + 4 > len(data): + buffer.release() + data.extend(bytearray(response_len + 4 - len(data))) + buffer = memoryview(data)[bytes_total_received:] + continue + + if bytes_total_received >= bytes_to_receive: + buffer.release() + break + + buffer = buffer[bytes_received:] + + return data async def close(self): async with self._mux: diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 7d5778c..e8437dc 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -212,7 +212,7 @@ def _connect_version(self) -> Union[dict, OrderedDict]: with BinaryStream(self.client) as stream: hs_request.from_python(stream) - self.send(stream.getbuffer(), reconnect=False) + self.send(stream.getvalue(), reconnect=False) with BinaryStream(self.client, self.recv(reconnect=False)) as stream: hs_response = HandshakeResponse.parse(stream, self.protocol_context) @@ -235,7 +235,7 @@ def reconnect(self): except connection_errors: pass - def request(self, data: Union[bytes, bytearray, memoryview], flags=None) -> bytearray: + def request(self, data: Union[bytes, bytearray], flags=None) -> bytearray: """ Perform request. @@ -245,7 +245,7 @@ def request(self, data: Union[bytes, bytearray, memoryview], flags=None) -> byte self.send(data, flags=flags) return self.recv() - def send(self, data: Union[bytes, bytearray, memoryview], flags=None, reconnect=True): + def send(self, data: Union[bytes, bytearray], flags=None, reconnect=True): """ Send data down the socket. @@ -275,22 +275,6 @@ def recv(self, flags=None, reconnect=True) -> bytearray: :param flags: (optional) OS-specific flags. :param reconnect: (optional) reconnect on failure, default True. """ - def _recv(buffer, num_bytes): - bytes_to_receive = num_bytes - while bytes_to_receive > 0: - try: - bytes_rcvd = self._socket.recv_into(buffer, bytes_to_receive, **kwargs) - if bytes_rcvd == 0: - raise SocketError('Connection broken.') - except connection_errors: - self.failed = True - if reconnect: - self.reconnect() - raise - - buffer = buffer[bytes_rcvd:] - bytes_to_receive -= bytes_rcvd - if self.closed: raise SocketError('Attempt to use closed connection.') @@ -298,12 +282,39 @@ def _recv(buffer, num_bytes): if flags is not None: kwargs['flags'] = flags - data = bytearray(4) - _recv(memoryview(data), 4) - response_len = int.from_bytes(data, PROTOCOL_BYTE_ORDER) + data = bytearray(1024) + buffer = memoryview(data) + bytes_total_received, bytes_to_receive = 0, 0 + while True: + try: + bytes_received = self._socket.recv_into(buffer, len(buffer), **kwargs) + if bytes_received == 0: + raise SocketError('Connection broken.') + bytes_total_received += bytes_received + except connection_errors: + self.failed = True + if reconnect: + self.reconnect() + raise + + if bytes_total_received < 4: + continue + elif bytes_to_receive == 0: + response_len = int.from_bytes(data[0:4], PROTOCOL_BYTE_ORDER) + bytes_to_receive = response_len + + if response_len + 4 > len(data): + buffer.release() + data.extend(bytearray(response_len + 4 - len(data))) + buffer = memoryview(data)[bytes_total_received:] + continue + + if bytes_total_received >= bytes_to_receive: + buffer.release() + break + + buffer = buffer[bytes_received:] - data.extend(bytearray(response_len)) - _recv(memoryview(data)[4:], response_len) return data def close(self): diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 0de50e2..55ed844 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -36,7 +36,10 @@ from ..stream import READ_BACKWARD -def tc_map(key: bytes, _memo_map: dict = {}): +_tc_map = {} + + +def tc_map(key: bytes): """ Returns a default parser/generator class for the given type code. @@ -49,7 +52,8 @@ def tc_map(key: bytes, _memo_map: dict = {}): of the “type code-type class” mapping, :return: parser/generator class for the type code. """ - if not _memo_map: + global _tc_map + if not _tc_map: from pyignite.datatypes import ( Null, ByteObject, ShortObject, IntObject, LongObject, FloatObject, DoubleObject, CharObject, BoolObject, UUIDObject, DateObject, @@ -64,7 +68,7 @@ def tc_map(key: bytes, _memo_map: dict = {}): MapObject, BinaryObject, WrappedDataObject, ) - _memo_map = { + _tc_map = { TC_NULL: Null, TC_BYTE: ByteObject, @@ -110,7 +114,7 @@ def tc_map(key: bytes, _memo_map: dict = {}): TC_COMPLEX_OBJECT: BinaryObject, TC_ARRAY_WRAPPED_OBJECTS: WrappedDataObject, } - return _memo_map[key] + return _tc_map[key] class Conditional: @@ -183,7 +187,7 @@ async def parse_async(self, stream): def __parse_length(self, stream): counter_type_len = ctypes.sizeof(self.counter_type) length = int.from_bytes( - stream.mem_view(offset=counter_type_len), + stream.slice(offset=counter_type_len), byteorder=PROTOCOL_BYTE_ORDER ) stream.seek(counter_type_len, SEEK_CUR) @@ -348,6 +352,9 @@ class AnyDataObject: """ _python_map = None _python_array_map = None + _map_obj_type = None + _collection_obj_type = None + _binary_obj_type = None @staticmethod def get_subtype(iterable, allow_none=False): @@ -391,7 +398,7 @@ async def parse_async(cls, stream): @classmethod def __data_class_parse(cls, stream): - type_code = bytes(stream.mem_view(offset=ctypes.sizeof(ctypes.c_byte))) + type_code = stream.slice(offset=ctypes.sizeof(ctypes.c_byte)) try: return tc_map(type_code) except KeyError: @@ -416,15 +423,17 @@ def __data_class_from_ctype(cls, ctype_object): return tc_map(type_code) @classmethod - def _init_python_map(cls): + def _init_python_mapping(cls): """ Optimizes Python types→Ignite types map creation for speed. Local imports seem inevitable here. """ from pyignite.datatypes import ( - LongObject, DoubleObject, String, BoolObject, Null, UUIDObject, - DateObject, TimeObject, DecimalObject, ByteArrayObject, + LongObject, DoubleObject, String, BoolObject, Null, UUIDObject, DateObject, TimeObject, + DecimalObject, ByteArrayObject, LongArrayObject, DoubleArrayObject, StringArrayObject, + BoolArrayObject, UUIDArrayObject, DateArrayObject, TimeArrayObject, DecimalArrayObject, + MapObject, CollectionObject, BinaryObject ) cls._python_map = { @@ -442,17 +451,6 @@ def _init_python_map(cls): decimal.Decimal: DecimalObject, } - @classmethod - def _init_python_array_map(cls): - """ - Optimizes Python types→Ignite array types map creation for speed. - """ - from pyignite.datatypes import ( - LongArrayObject, DoubleArrayObject, StringArrayObject, - BoolArrayObject, UUIDArrayObject, DateArrayObject, TimeArrayObject, - DecimalArrayObject, - ) - cls._python_array_map = { int: LongArrayObject, float: DoubleArrayObject, @@ -466,18 +464,20 @@ def _init_python_array_map(cls): decimal.Decimal: DecimalArrayObject, } + cls._map_obj_type = MapObject + cls._collection_obj_type = CollectionObject + cls._binary_obj_type = BinaryObject + @classmethod def map_python_type(cls, value): - from pyignite.datatypes import ( - MapObject, CollectionObject, BinaryObject, - ) - - if cls._python_map is None: - cls._init_python_map() - if cls._python_array_map is None: - cls._init_python_array_map() + if cls._python_map is None or cls._python_array_map is None: + cls._init_python_mapping() value_type = type(value) + + if value_type in cls._python_map: + return cls._python_map[value_type] + if is_iterable(value) and value_type not in (str, bytearray, bytes): value_subtype = cls.get_subtype(value) if value_subtype in cls._python_array_map: @@ -490,7 +490,7 @@ def map_python_type(cls, value): isinstance(value[0], int), isinstance(value[1], dict), ]): - return MapObject + return cls._map_obj_type if all([ value_subtype is None, @@ -498,7 +498,7 @@ def map_python_type(cls, value): isinstance(value[0], int), is_iterable(value[1]), ]): - return CollectionObject + return cls._collection_obj_type # no default for ObjectArrayObject, sorry @@ -507,10 +507,8 @@ def map_python_type(cls, value): ) if is_binary(value): - return BinaryObject + return cls._binary_obj_type - if value_type in cls._python_map: - return cls._python_map[value_type] raise TypeError( 'Type `{}` is invalid.'.format(value_type) ) diff --git a/pyignite/datatypes/null_object.py b/pyignite/datatypes/null_object.py index f16034f..8ac47b2 100644 --- a/pyignite/datatypes/null_object.py +++ b/pyignite/datatypes/null_object.py @@ -140,7 +140,7 @@ async def to_python_async(cls, ctypes_object, *args, **kwargs): def __check_null_input(cls, stream): type_len = ctypes.sizeof(ctypes.c_byte) - if stream.mem_view(offset=type_len) == TC_NULL: + if stream.slice(offset=type_len) == TC_NULL: stream.seek(type_len, SEEK_CUR) return True, Null.build_c_type() diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index 2b61235..4ca6795 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -91,7 +91,7 @@ def build_c_type(cls, length: int): @classmethod def parse_not_null(cls, stream): length = int.from_bytes( - stream.mem_view(stream.tell() + ctypes.sizeof(ctypes.c_byte), ctypes.sizeof(ctypes.c_int)), + stream.slice(stream.tell() + ctypes.sizeof(ctypes.c_byte), ctypes.sizeof(ctypes.c_int)), byteorder=PROTOCOL_BYTE_ORDER ) diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index d9e6aaf..8dac64f 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -122,7 +122,7 @@ def perform( """ with BinaryStream(conn.client) as stream: self.from_python(stream, query_params) - response_data = conn.request(stream.getbuffer()) + response_data = conn.request(stream.getvalue()) response_struct = self.response_type(protocol_context=conn.protocol_context, following=response_config, **kwargs) @@ -154,7 +154,7 @@ async def perform_async( """ with AioBinaryStream(conn.client) as stream: await self.from_python_async(stream, query_params) - data = await conn.request(stream.getbuffer()) + data = await conn.request(stream.getvalue()) response_struct = self.response_type(protocol_context=conn.protocol_context, following=response_config, **kwargs) diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index 6495802..f0338e1 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -27,42 +27,42 @@ from pyignite.stream import READ_BACKWARD +class StatusFlagResponseHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('length', ctypes.c_int), + ('query_id', ctypes.c_longlong), + ('flags', ctypes.c_short) + ] + + +class ResponseHeader(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('length', ctypes.c_int), + ('query_id', ctypes.c_longlong), + ('status_code', ctypes.c_int) + ] + + @attr.s class Response: following = attr.ib(type=list, factory=list) protocol_context = attr.ib(type=type(ProtocolContext), default=None) - _response_header = None _response_class_name = 'Response' def __attrs_post_init__(self): # replace None with empty list self.following = self.following or [] - def __build_header(self): - if self._response_header is None: - fields = [ - ('length', ctypes.c_int), - ('query_id', ctypes.c_longlong), - ] - - if self.protocol_context.is_status_flags_supported(): - fields.append(('flags', ctypes.c_short)) - else: - fields.append(('status_code', ctypes.c_int),) - - self._response_header = type( - 'ResponseHeader', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': fields, - }, - ) - return self._response_header - def __parse_header(self, stream): init_pos = stream.tell() - header_class = self.__build_header() + + if self.protocol_context.is_status_flags_supported(): + header_class = StatusFlagResponseHeader + else: + header_class = ResponseHeader + header_len = ctypes.sizeof(header_class) header = stream.read_ctype(header_class) stream.seek(header_len, SEEK_CUR) diff --git a/pyignite/stream/binary_stream.py b/pyignite/stream/binary_stream.py index 57b4b83..3923a3b 100644 --- a/pyignite/stream/binary_stream.py +++ b/pyignite/stream/binary_stream.py @@ -23,7 +23,12 @@ READ_BACKWARD = 1 -class BinaryStreamBaseMixin: +class BinaryStreamBase: + def __init__(self, client, buf=None): + self.client = client + self.stream = BytesIO(buf) if buf else BytesIO() + self._buffer = None + @property def compact_footer(self) -> bool: return self.client.compact_footer @@ -50,10 +55,11 @@ def read_ctype(self, ctype_class, position=None, direction=READ_FORWARD): else: start, end = init_position - ctype_len, init_position - buf = self.stream.getbuffer()[start:end] - return ctype_class.from_buffer_copy(buf) + with self.getbuffer()[start:end] as buf: + return ctype_class.from_buffer_copy(buf) def write(self, buf): + self._release_buffer() return self.stream.write(buf) def tell(self): @@ -62,30 +68,39 @@ def tell(self): def seek(self, *args, **kwargs): return self.stream.seek(*args, **kwargs) + def getbuffer(self): + if self._buffer: + return self._buffer + + self._buffer = self.stream.getbuffer() + return self._buffer + def getvalue(self): return self.stream.getvalue() - def getbuffer(self): - return self.stream.getbuffer() - - def mem_view(self, start=-1, offset=0): + def slice(self, start=-1, offset=0): start = start if start >= 0 else self.tell() - return self.stream.getbuffer()[start:start + offset] + with self.getbuffer()[start:start + offset] as buf: + return bytes(buf) def hashcode(self, start, bytes_len): - return ignite_utils.hashcode(self.stream.getbuffer()[start:start + bytes_len]) + with self.getbuffer()[start:start + bytes_len] as buf: + return ignite_utils.hashcode(buf) + + def _release_buffer(self): + if self._buffer: + self._buffer.release() + self._buffer = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): - try: - self.stream.close() - except BufferError: - pass + self._release_buffer() + self.stream.close() -class BinaryStream(BinaryStreamBaseMixin): +class BinaryStream(BinaryStreamBase): """ Synchronous binary stream. """ @@ -94,8 +109,7 @@ def __init__(self, client: 'pyignite.Client', buf: Optional[Union[bytes, bytearr :param client: Client instance, required. :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. """ - self.client = client - self.stream = BytesIO(buf) if buf else BytesIO() + super().__init__(client, buf) def get_dataclass(self, header): result = self.client.query_binary_type(header.type_id, header.schema_id) @@ -107,7 +121,7 @@ def register_binary_type(self, *args, **kwargs): self.client.register_binary_type(*args, **kwargs) -class AioBinaryStream(BinaryStreamBaseMixin): +class AioBinaryStream(BinaryStreamBase): """ Asyncio binary stream. """ @@ -118,8 +132,7 @@ def __init__(self, client: 'pyignite.AioClient', buf: Optional[Union[bytes, byte :param client: AioClient instance, required. :param buf: Buffer, optional parameter. If not passed, creates empty BytesIO. """ - self.client = client - self.stream = BytesIO(buf) if buf else BytesIO() + super().__init__(client, buf) async def get_dataclass(self, header): result = await self.client.query_binary_type(header.type_id, header.schema_id) From 70bb1d9c9e6648a74257f9c7f65922d80dc7317f Mon Sep 17 00:00:00 2001 From: Ivan Dashchinskiy Date: Thu, 8 Apr 2021 13:09:22 +0300 Subject: [PATCH 28/62] IGNITE-14418 Add async client documentation, update examples This closes #29 --- README.md | 12 +- docs/async_examples.rst | 151 +++++++++ docs/conf.py | 28 +- docs/datatypes/cache_props.rst | 8 +- docs/examples.rst | 136 +++++--- docs/index.rst | 1 + docs/modules.rst | 16 +- docs/readme.rst | 93 +++--- docs/source/modules.rst | 15 + docs/source/pyignite.aio_cache.rst | 22 ++ docs/source/pyignite.aio_client.rst | 22 ++ docs/source/pyignite.api.binary.rst | 7 - docs/source/pyignite.api.cache_config.rst | 7 - docs/source/pyignite.api.key_value.rst | 7 - docs/source/pyignite.api.result.rst | 7 - docs/source/pyignite.api.rst | 19 -- docs/source/pyignite.api.sql.rst | 7 - docs/source/pyignite.binary.rst | 15 + docs/source/pyignite.cache.rst | 17 +- docs/source/pyignite.client.rst | 17 +- docs/source/pyignite.connection.handshake.rst | 7 - docs/source/pyignite.connection.rst | 24 +- docs/source/pyignite.connection.ssl.rst | 7 - docs/source/pyignite.constants.rst | 7 - docs/source/pyignite.cursors.rst | 22 ++ docs/source/pyignite.datatypes.base.rst | 15 + docs/source/pyignite.datatypes.binary.rst | 15 + .../pyignite.datatypes.cache_config.rst | 15 + .../pyignite.datatypes.cache_properties.rst | 15 + docs/source/pyignite.datatypes.complex.rst | 15 + docs/source/pyignite.datatypes.internal.rst | 15 + docs/source/pyignite.datatypes.key_value.rst | 15 + .../source/pyignite.datatypes.null_object.rst | 15 + docs/source/pyignite.datatypes.primitive.rst | 15 + .../pyignite.datatypes.primitive_arrays.rst | 15 + .../pyignite.datatypes.primitive_objects.rst | 15 + docs/source/pyignite.datatypes.prop_codes.rst | 7 - docs/source/pyignite.datatypes.rst | 17 +- docs/source/pyignite.datatypes.sql.rst | 15 + docs/source/pyignite.datatypes.standard.rst | 15 + docs/source/pyignite.datatypes.type_codes.rst | 7 - docs/source/pyignite.exceptions.rst | 15 + docs/source/pyignite.queries.op_codes.rst | 7 - docs/source/pyignite.queries.rst | 15 - docs/source/pyignite.rst | 33 +- docs/source/pyignite.utils.rst | 7 - examples/async_key_value.py | 56 ++++ examples/async_sql.py | 301 ++++++++++++++++++ examples/binary_basics.py | 35 +- examples/create_binary.py | 145 ++++----- examples/failover.py | 48 +-- examples/get_and_put.py | 32 +- examples/get_and_put_complex.py | 69 ++-- examples/migrate_binary.py | 30 +- examples/read_binary.py | 156 +++++---- examples/readme.md | 6 +- examples/scans.py | 65 ++-- examples/sql.py | 173 +++++----- examples/type_hints.py | 45 ++- pyignite/aio_cache.py | 2 +- pyignite/aio_client.py | 8 +- pyignite/cache.py | 4 +- pyignite/client.py | 10 +- pyignite/cursors.py | 65 ++++ 64 files changed, 1535 insertions(+), 692 deletions(-) create mode 100644 docs/async_examples.rst create mode 100644 docs/source/pyignite.aio_cache.rst create mode 100644 docs/source/pyignite.aio_client.rst delete mode 100644 docs/source/pyignite.api.binary.rst delete mode 100644 docs/source/pyignite.api.cache_config.rst delete mode 100644 docs/source/pyignite.api.key_value.rst delete mode 100644 docs/source/pyignite.api.result.rst delete mode 100644 docs/source/pyignite.api.rst delete mode 100644 docs/source/pyignite.api.sql.rst delete mode 100644 docs/source/pyignite.connection.handshake.rst delete mode 100644 docs/source/pyignite.connection.ssl.rst delete mode 100644 docs/source/pyignite.constants.rst create mode 100644 docs/source/pyignite.cursors.rst delete mode 100644 docs/source/pyignite.datatypes.prop_codes.rst delete mode 100644 docs/source/pyignite.datatypes.type_codes.rst delete mode 100644 docs/source/pyignite.queries.op_codes.rst delete mode 100644 docs/source/pyignite.queries.rst delete mode 100644 docs/source/pyignite.utils.rst create mode 100644 examples/async_key_value.py create mode 100644 examples/async_sql.py diff --git a/README.md b/README.md index 47bd712..f44276f 100644 --- a/README.md +++ b/README.md @@ -99,8 +99,8 @@ Do not forget to install test requirements: $ pip install -r requirements/install.txt -r requirements/tests.txt ``` -Also, you'll need to have a binary release of Ignite with lib4j2 enabled and -`IGNITE_HOME` properly set: +Also, you'll need to have a binary release of Ignite with `log4j2` enabled and to set +`IGNITE_HOME` environment variable: ```bash $ cd $ export IGNITE_HOME=$(pwd) @@ -114,14 +114,6 @@ $ pytest ```bash $ pytest --examples ``` -### Run with ssl and not encrypted key -```bash -$ pytest --use-ssl=True --ssl-certfile=./tests/ssl/client_full.pem -``` -### Run with ssl and password-protected key -```bash -$ pytest --use-ssl=True --ssl-certfile=./tests/config/ssl/client_with_pass_full.pem --ssl-keyfile-password=654321 -``` If you need to change the connection parameters, see the documentation on [testing](https://apache-ignite-binary-protocol-client.readthedocs.io/en/latest/readme.html#testing). diff --git a/docs/async_examples.rst b/docs/async_examples.rst new file mode 100644 index 0000000..363599a --- /dev/null +++ b/docs/async_examples.rst @@ -0,0 +1,151 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +.. _async_examples_of_usage: + +============================ +Asynchronous client examples +============================ +File: `async_key_value.py`_. + +Basic usage +----------- +Asynchronous client and cache (:py:class:`~pyignite.aio_client.AioClient` and :py:class:`~pyignite.aio_cache.AioCache`) +has mostly the same API as synchronous ones (:py:class:`~pyignite.client.Client` and :py:class:`~pyignite.cache.Cache`). +But there is some peculiarities. + +Basic key-value +=============== +Firstly, import dependencies. + +.. literalinclude:: ../examples/async_key_value.py + :language: python + :lines: 18 + +Let's connect to cluster and perform key-value queries. + +.. literalinclude:: ../examples/async_key_value.py + :language: python + :dedent: 4 + :lines: 23-38 + +Scan +==== +The :py:meth:`~pyignite.aio_cache.AioСache.scan` method returns :py:class:`~pyignite.cursors.AioScanCursor`, +that yields the resulting rows. + +.. literalinclude:: ../examples/async_key_value.py + :language: python + :dedent: 4 + :lines: 39-50 + + +File: `async_sql.py`_. + +SQL +--- + +First let us establish a connection. + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 4 + :lines: 197-198 + +Then create tables. Begin with `Country` table, than proceed with related +tables `City` and `CountryLanguage`. + +.. literalinclude:: ../examples/async_sql.py + :language: python + :lines: 25-42, 51-59, 67-74 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 4 + :lines: 199-205 + +Create indexes. + +.. literalinclude:: ../examples/async_sql.py + :language: python + :lines: 60-62, 75-77 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 207-209 + +Fill tables with data. + +.. literalinclude:: ../examples/async_sql.py + :language: python + :lines: 43-50, 63-66, 78-81 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 212-223 + +Now let us answer some questions. + +What are the 10 largest cities in our data sample (population-wise)? +==================================================================== + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 225-243 + +The :py:meth:`~pyignite.aio_client.AioClient.sql` method returns :py:class:`~pyignite.cursors.AioSqlFieldsCursor`, +that yields the resulting rows. + +What are the 10 most populated cities throughout the 3 chosen countries? +======================================================================== + +If you set the `include_field_names` argument to `True`, the +:py:meth:`~pyignite.client.Client.sql` method will generate a list of +column names as a first yield. Unfortunately, there is no async equivalent of `next` but +you can await :py:meth:`__anext__()` +of :py:class:`~pyignite.cursors.AioSqlFieldsCursor` + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 246-271 + +Display all the information about a given city +============================================== + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 273-288 + +Finally, delete the tables used in this example with the following queries: + +.. literalinclude:: ../examples/async_sql.py + :language: python + :lines: 83 + +.. literalinclude:: ../examples/async_sql.py + :language: python + :dedent: 8 + :lines: 290-297 + + + + +.. _async_key_value.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_key_value.py +.. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 8c498aa..31e4fa1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,19 @@ -# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + # # Configuration file for the Sphinx documentation builder. # @@ -14,19 +29,16 @@ # import os import sys + + sys.path.insert(0, os.path.abspath('../')) # -- Project information ----------------------------------------------------- project = 'Apache Ignite binary client Python API' -copyright = '2018, Apache Software Foundation (ASF)' -author = 'Dmitry Melnichuk' - -# The short X.Y version -version = '' -# The full version, including alpha/beta/rc tags -release = '0.1.0' +copyright = '2021, Apache Software Foundation (ASF)' +author = '' # -- General configuration --------------------------------------------------- diff --git a/docs/datatypes/cache_props.rst b/docs/datatypes/cache_props.rst index 03443b9..3cabbe6 100644 --- a/docs/datatypes/cache_props.rst +++ b/docs/datatypes/cache_props.rst @@ -31,7 +31,9 @@ matters. | name | value | type | | +=======================================+==========+==========+=======================================================+ | Read/write cache properties, used to configure cache via :py:meth:`~pyignite.client.Client.create_cache` or | -| :py:meth:`~pyignite.client.Client.get_or_create_cache` | +| :py:meth:`~pyignite.client.Client.get_or_create_cache` of :py:class:`~pyignite.client.Client` | +| (:py:meth:`~pyignite.aio_client.AioClient.create_cache` or | +| :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` of :py:class:`~pyignite.aio_client.AioClient`). | +---------------------------------------+----------+----------+-------------------------------------------------------+ | PROP_NAME | 0 | str | Cache name. This is the only *required* property. | +---------------------------------------+----------+----------+-------------------------------------------------------+ @@ -96,10 +98,6 @@ matters. +---------------------------------------+----------+----------+-------------------------------------------------------+ | PROP_STATISTICS_ENABLED | 406 | bool | Statistics enabled | +---------------------------------------+----------+----------+-------------------------------------------------------+ -| Read-only cache properties. Can not be set, but only retrieved via :py:meth:`~pyignite.cache.Cache.settings` | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_INVALIDATE | -1 | bool | Invalidate | -+---------------------------------------+----------+----------+-------------------------------------------------------+ Query entity ------------ diff --git a/docs/examples.rst b/docs/examples.rst index 4b8c7e3..0379330 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -37,28 +37,32 @@ Create cache .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 21 + :dedent: 4 + :lines: 20 Put value in cache ================== .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 23 + :dedent: 4 + :lines: 22 Get value from cache ==================== .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 25-29 + :dedent: 4 + :lines: 24-28 Get multiple values from cache ============================== .. literalinclude:: ../examples/get_and_put.py :language: python - :lines: 31-36 + :dedent: 4 + :lines: 30-35 Type hints usage ================ @@ -66,6 +70,7 @@ File: `type_hints.py`_ .. literalinclude:: ../examples/type_hints.py :language: python + :dedent: 4 :lines: 24-48 As a rule of thumb: @@ -91,33 +96,27 @@ Let us put some data in cache. .. literalinclude:: ../examples/scans.py :language: python - :lines: 23-33 + :dedent: 4 + :lines: 20-29 -:py:meth:`~pyignite.cache.Cache.scan` returns a generator, that yields +:py:meth:`~pyignite.cache.Cache.scan` returns a cursor, that yields two-tuples of key and value. You can iterate through the generated pairs in a safe manner: .. literalinclude:: ../examples/scans.py :language: python - :lines: 34-41 + :dedent: 4 + :lines: 31-39 -Or, alternatively, you can convert the generator to dictionary in one go: +Or, alternatively, you can convert the cursor to dictionary in one go: .. literalinclude:: ../examples/scans.py :language: python - :lines: 44-52 + :dedent: 4 + :lines: 41-50 But be cautious: if the cache contains a large set of data, the dictionary -may eat too much memory! - -Do cleanup -========== - -Destroy created cache and close connection. - -.. literalinclude:: ../examples/scans.py - :language: python - :lines: 54-55 +may consume too much memory! .. _sql_examples: @@ -132,7 +131,7 @@ each of the collection type. Second comes the data value. .. literalinclude:: ../examples/get_and_put_complex.py :language: python - :lines: 19-21 + :lines: 19 Map === @@ -148,7 +147,8 @@ Since CPython 3.6 all dictionaries became de facto ordered. You can always use .. literalinclude:: ../examples/get_and_put_complex.py :language: python - :lines: 29-41 + :dedent: 4 + :lines: 26-38 Collection ========== @@ -164,7 +164,8 @@ and you always get `list` back. .. literalinclude:: ../examples/get_and_put_complex.py :language: python - :lines: 43-57 + :dedent: 4 + :lines: 40-54 Object array ============ @@ -175,7 +176,8 @@ contents. But it still can be used for interoperability with Java. .. literalinclude:: ../examples/get_and_put_complex.py :language: python - :lines: 59-68 + :dedent: 4 + :lines: 56-65 SQL --- @@ -198,19 +200,34 @@ tables `City` and `CountryLanguage`. .. literalinclude:: ../examples/sql.py :language: python - :lines: 25-42, 51-59, 67-74, 199-204 + :lines: 25-42, 51-59, 67-74 + +.. literalinclude:: ../examples/sql.py + :language: python + :dedent: 4 + :lines: 199-204 Create indexes. .. literalinclude:: ../examples/sql.py :language: python - :lines: 60-62, 75-77, 207-208 + :lines: 60-62, 75-77 + +.. literalinclude:: ../examples/sql.py + :language: python + :dedent: 4 + :lines: 207-208 Fill tables with data. .. literalinclude:: ../examples/sql.py :language: python - :lines: 43-50, 63-66, 78-81, 211-218 + :lines: 43-50, 63-66, 78-81 + +.. literalinclude:: ../examples/sql.py + :language: python + :dedent: 4 + :lines: 211-218 Data samples are taken from `PyIgnite GitHub repository`_. @@ -221,6 +238,7 @@ What are the 10 largest cities in our data sample (population-wise)? .. literalinclude:: ../examples/sql.py :language: python + :dedent: 4 :lines: 24, 221-238 The :py:meth:`~pyignite.client.Client.sql` method returns a generator, @@ -236,20 +254,27 @@ column names as a first yield. You can access field names with Python built-in .. literalinclude:: ../examples/sql.py :language: python - :lines: 241-269 + :dedent: 4 + :lines: 241-266 Display all the information about a given city ============================================== .. literalinclude:: ../examples/sql.py :language: python - :lines: 272-290 + :dedent: 4 + :lines: 268-283 Finally, delete the tables used in this example with the following queries: .. literalinclude:: ../examples/sql.py :language: python - :lines: 82-83, 293-298 + :lines: 82-83 + +.. literalinclude:: ../examples/sql.py + :language: python + :dedent: 4 + :lines: 285-291 .. _complex_object_usage: @@ -291,7 +316,8 @@ automatically when reading Complex objects. .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 18-20, 30-34, 39-42, 48-49 + :dedent: 4 + :lines: 32-34, 39-42, 48-49 Here you can see how :class:`~pyignite.binary.GenericObjectMeta` uses `attrs`_ package internally for creating nice `__init__()` and `__repr__()` @@ -317,14 +343,15 @@ Anyway, you can reuse the autogenerated dataclass for subsequent writes: .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 53, 34-37 + :dedent: 4 + :lines: 52, 33-37 :class:`~pyignite.binary.GenericObjectMeta` can also be used directly for creating custom classes: .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 22-27 + :lines: 18-27 Note how the `Person` class is defined. `schema` is a :class:`~pyignite.binary.GenericObjectMeta` metaclass parameter. @@ -343,7 +370,8 @@ register said class explicitly with your client: .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 51 + :dedent: 4 + :lines: 50 Now, when we dealt with the basics of `pyignite` implementation of Complex Objects, let us move on to more elaborate examples. @@ -364,6 +392,7 @@ Let us do it again and examine the Ignite storage afterwards. .. literalinclude:: ../examples/read_binary.py :language: python + :dedent: 4 :lines: 222-229 We can see that Ignite created a cache for each of our tables. The caches are @@ -374,6 +403,7 @@ using a :py:attr:`~pyignite.cache.Cache.settings` property. .. literalinclude:: ../examples/read_binary.py :language: python + :dedent: 4 :lines: 231-251 The values of `value_type_name` and `key_type_name` are names of the binary @@ -386,6 +416,7 @@ functions and verify the correctness of the result. .. literalinclude:: ../examples/read_binary.py :language: python + :dedent: 4 :lines: 253-267 What we see is a tuple of key and value, extracted from the cache. Both key @@ -421,37 +452,37 @@ These are the necessary steps to perform the task. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 22-63 + :dedent: 4 + :lines: 24-63 2. Define Complex object data class. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 66-76 + :dedent: 4 + :lines: 64-75 3. Insert row. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 79-83 + :dedent: 4 + :lines: 76-80 Now let us make sure that our cache really can be used with SQL functions. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 85-93 + :dedent: 4 + :lines: 82-87 Note, however, that the cache we create can not be dropped with DDL command. - -.. literalinclude:: ../examples/create_binary.py - :language: python - :lines: 95-100 - It should be deleted as any other key-value cache. .. literalinclude:: ../examples/create_binary.py :language: python - :lines: 102 + :dedent: 4 + :lines: 89-96 Migrate ======= @@ -470,7 +501,8 @@ First get the vouchers' cache. .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 108-111 + :dedent: 4 + :lines: 111 If you do not store the schema of the Complex object in code, you can obtain it as a dataclass property with @@ -478,14 +510,15 @@ it as a dataclass property with .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 116-123 + :dedent: 4 + :lines: 116-120 Let us modify the schema and create a new Complex object class with an updated schema. .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 125-138 + :lines: 122-138 Now migrate the data from the old schema to the new one. @@ -525,21 +558,16 @@ Gather 3 Ignite nodes on `localhost` into one cluster and run: .. literalinclude:: ../examples/failover.py :language: python - :lines: 16-51 + :lines: 16-53 Then try shutting down and restarting nodes, and see what happens. .. literalinclude:: ../examples/failover.py :language: python - :lines: 53-65 + :lines: 55-67 Client reconnection do not require an explicit user action, like calling a special method or resetting a parameter. - -.. literalinclude:: ../examples/failover.py - :language: python - :lines: 48 - It means that instead of checking the connection status it is better for `pyignite` user to just try the supposed data operations and catch the resulting exception. @@ -651,10 +679,12 @@ with the following message: # pyignite.exceptions.HandshakeError: Handshake error: Unauthenticated sessions are prohibited. .. _get_and_put.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/get_and_put.py +.. _async_key_value.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_key_value.py .. _type_hints.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/type_hints.py .. _failover.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/failover.py .. _scans.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/scans.py .. _sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/sql.py +.. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py .. _binary_basics.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/binary_basics.py .. _read_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/read_binary.py .. _create_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/create_binary.py diff --git a/docs/index.rst b/docs/index.rst index 35bd18c..7c28b6c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,6 +23,7 @@ Welcome to Apache Ignite binary client Python API documentation! readme modules examples + async_examples Indices and tables diff --git a/docs/modules.rst b/docs/modules.rst index cabc915..0cce570 100644 --- a/docs/modules.rst +++ b/docs/modules.rst @@ -21,11 +21,13 @@ The modules and subpackages listed here are the basis of a stable API of `pyignite`, intended for end users. .. toctree:: - :maxdepth: 1 - :caption: Modules: + :maxdepth: 1 + :caption: Modules: - Client - Cache - datatypes/parsers - datatypes/cache_props - Exceptions + Client + AioClient + Cache + AioCache + datatypes/parsers + datatypes/cache_props + Exceptions diff --git a/docs/readme.rst b/docs/readme.rst index 81298ae..807865a 100644 --- a/docs/readme.rst +++ b/docs/readme.rst @@ -35,9 +35,9 @@ through a raw TCP socket. Prerequisites ------------- -- *Python 3.4* or above (3.6 is tested), +- *Python 3.6* or above (3.6, 3.7, 3.8 and 3.9 are tested), - Access to *Apache Ignite* node, local or remote. The current thin client - version was tested on *Apache Ignite 2.7.0* (binary client protocol 1.2.0). + version was tested on *Apache Ignite 2.10.0* (binary client protocol 1.7.0). Installation ------------ @@ -59,8 +59,7 @@ the whole repository: :: -$ git clone git@github.com:apache/ignite.git -$ cd ignite/modules/platforms/python +$ git clone git@github.com:apache/ignite-python-thin-client.git $ pip install -e . This will install the repository version of `pyignite` into your environment @@ -74,13 +73,26 @@ the the additional requirements into your working Python environment using $ pip install -r requirements/.txt + +For development, it is recommended to install `tests` requirements + +:: + +$ pip install -r requirements/tests.txt + +For checking codestyle run: + +:: + +$ flake8 + You may also want to consult the `setuptools`_ manual about using `setup.py`. Examples -------- Some examples of using pyignite are provided in -`ignite/modules/platforms/python/examples` folder. They are extensively +`examples` folder. They are extensively commented in the :ref:`examples_of_usage` section of the documentation. This code implies that it is run in the environment with `pyignite` package @@ -93,62 +105,26 @@ the explanation of testing, look up the `Testing`_ section. Testing ------- -Create and activate virtualenv_ environment. Run - -:: - -$ cd ignite/modules/platforms/python -$ python ./setup.py pytest +Create and activate virtualenv_ environment. -This does not require `pytest` and other test dependencies to be installed -in your environment. - -Some or all tests require Apache Ignite node running on localhost:10800. -To override the default parameters, use command line options -``--ignite-host`` and ``--ignite-port``: +Install a binary release of Ignite with `log4j2` enabled and set `IGNITE_HOME` environment variable. :: -$ python ./setup.py pytest --addopts "--ignite-host=example.com --ignite-port=19840" - -You can use each of these two options multiple times. All combinations -of given host and port will be tested. - -You can also test client against a server with SSL-encrypted connection. -SSL-related `pytest` parameters are: +$ cd +$ export IGNITE_HOME=$(pwd) +$ cp -r $IGNITE_HOME/libs/optional/ignite-log4j2 $IGNITE_HOME/libs/ -``--use-ssl`` − use SSL encryption, -``--ssl-certfile`` − a path to ssl certificate file to identify local party, +Run -``--ssl-ca-certfile`` − a path to a trusted certificate or a certificate chain, - -``--ssl-cert-reqs`` − determines how the remote side certificate is treated: - -- ``NONE`` (ignore, default), -- ``OPTIONAL`` (validate, if provided), -- ``REQUIRED`` (valid remote certificate is required), - -``--ssl-ciphers`` − ciphers to use, - -``--ssl-version`` − SSL version: +:: -- ``TLSV1_1`` (default), -- ``TLSV1_2``. +$ pip install -e . +$ pytest Other `pytest` parameters: -``--timeout`` − timeout (in seconds) for each socket operation, including -`connect`. Accepts integer or float value. Default is None (blocking mode), - -``--partition-aware`` − experimental; off by default; turns on the partition -awareness: a way for the thin client to calculate a data placement for the -given key. - -``--username`` and ``--password`` − credentials to authenticate to Ignite -cluster. Used in conjunction with `authenticationEnabled` property in cluster -configuration. - ``--examples`` − run the examples as one test. If you wish to run *only* the examples, supply also the name of the test function to `pytest` launcher: @@ -167,25 +143,33 @@ Since failover, SSL and authentication examples are meant to be controlled by user or depend on special configuration of the Ignite cluster, they can not be automated. +Using tox +""""""""" +For automate running tests against different python version, it is recommended to use tox_ + +:: + +$ pip install tox +$ tox + + Documentation ------------- To recompile this documentation, do this from your virtualenv_ environment: :: -$ cd ignite/modules/platforms/python $ pip install -r requirements/docs.txt $ cd docs $ make html -Then open `ignite/modules/platforms/python/docs/generated/html/index.html`_ +Then open `docs/generated/html/index.html`_ in your browser. If you feel that old version is stuck, do :: -$ cd ignite/modules/platforms/python/docs $ make clean $ sphinx-apidoc -feM -o source/ ../ ../setup.py $ make html @@ -201,6 +185,7 @@ This is a free software, brought to you on terms of the `Apache License v2`_. .. _binary client protocol: https://apacheignite.readme.io/docs/binary-client-protocol .. _Apache License v2: http://www.apache.org/licenses/LICENSE-2.0 .. _virtualenv: https://virtualenv.pypa.io/ +.. _tox: https://tox.readthedocs.io/en/latest/ .. _setuptools: https://setuptools.readthedocs.io/ -.. _ignite/modules/platforms/python/docs/generated/html/index.html: . +.. _docs/generated/html/index.html: . .. _editable installs: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs diff --git a/docs/source/modules.rst b/docs/source/modules.rst index c125dd3..189a011 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + python ====== diff --git a/docs/source/pyignite.aio_cache.rst b/docs/source/pyignite.aio_cache.rst new file mode 100644 index 0000000..b62a33a --- /dev/null +++ b/docs/source/pyignite.aio_cache.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.aio_cache module +========================= + +.. automodule:: pyignite.aio_cache + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.aio_client.rst b/docs/source/pyignite.aio_client.rst new file mode 100644 index 0000000..922c559 --- /dev/null +++ b/docs/source/pyignite.aio_client.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.aio_client module +========================== + +.. automodule:: pyignite.aio_client + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.api.binary.rst b/docs/source/pyignite.api.binary.rst deleted file mode 100644 index 49f1c86..0000000 --- a/docs/source/pyignite.api.binary.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.binary module -========================== - -.. automodule:: pyignite.api.binary - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.cache_config.rst b/docs/source/pyignite.api.cache_config.rst deleted file mode 100644 index 599c857..0000000 --- a/docs/source/pyignite.api.cache_config.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.cache\_config module -================================= - -.. automodule:: pyignite.api.cache_config - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.key_value.rst b/docs/source/pyignite.api.key_value.rst deleted file mode 100644 index 52d6c3f..0000000 --- a/docs/source/pyignite.api.key_value.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.key\_value module -============================== - -.. automodule:: pyignite.api.key_value - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.result.rst b/docs/source/pyignite.api.result.rst deleted file mode 100644 index 21398e3..0000000 --- a/docs/source/pyignite.api.result.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.result module -========================== - -.. automodule:: pyignite.api.result - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.api.rst b/docs/source/pyignite.api.rst deleted file mode 100644 index e18d4a3..0000000 --- a/docs/source/pyignite.api.rst +++ /dev/null @@ -1,19 +0,0 @@ -pyignite.api package -==================== - -.. automodule:: pyignite.api - :members: - :undoc-members: - :show-inheritance: - -Submodules ----------- - -.. toctree:: - - pyignite.api.binary - pyignite.api.cache_config - pyignite.api.key_value - pyignite.api.result - pyignite.api.sql - diff --git a/docs/source/pyignite.api.sql.rst b/docs/source/pyignite.api.sql.rst deleted file mode 100644 index 84479ad..0000000 --- a/docs/source/pyignite.api.sql.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.api.sql module -======================= - -.. automodule:: pyignite.api.sql - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.binary.rst b/docs/source/pyignite.binary.rst index 6b21582..eeab940 100644 --- a/docs/source/pyignite.binary.rst +++ b/docs/source/pyignite.binary.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.binary module ====================== diff --git a/docs/source/pyignite.cache.rst b/docs/source/pyignite.cache.rst index e6e83c5..f4099de 100644 --- a/docs/source/pyignite.cache.rst +++ b/docs/source/pyignite.cache.rst @@ -1,7 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.cache module ===================== .. automodule:: pyignite.cache :members: :undoc-members: - :show-inheritance: + :inherited-members: diff --git a/docs/source/pyignite.client.rst b/docs/source/pyignite.client.rst index fef316b..e978dc1 100644 --- a/docs/source/pyignite.client.rst +++ b/docs/source/pyignite.client.rst @@ -1,7 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.client module ====================== .. automodule:: pyignite.client :members: :undoc-members: - :show-inheritance: + :inherited-members: diff --git a/docs/source/pyignite.connection.handshake.rst b/docs/source/pyignite.connection.handshake.rst deleted file mode 100644 index 28e83df..0000000 --- a/docs/source/pyignite.connection.handshake.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.connection.handshake module -==================================== - -.. automodule:: pyignite.connection.handshake - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.connection.rst b/docs/source/pyignite.connection.rst index f1acd2b..90c59db 100644 --- a/docs/source/pyignite.connection.rst +++ b/docs/source/pyignite.connection.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.connection package =========================== @@ -5,12 +20,3 @@ pyignite.connection package :members: :undoc-members: :show-inheritance: - -Submodules ----------- - -.. toctree:: - - pyignite.connection.handshake - pyignite.connection.ssl - diff --git a/docs/source/pyignite.connection.ssl.rst b/docs/source/pyignite.connection.ssl.rst deleted file mode 100644 index 8eebf43..0000000 --- a/docs/source/pyignite.connection.ssl.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.connection.ssl module -============================== - -.. automodule:: pyignite.connection.ssl - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.constants.rst b/docs/source/pyignite.constants.rst deleted file mode 100644 index f71e4f1..0000000 --- a/docs/source/pyignite.constants.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.constants module -========================= - -.. automodule:: pyignite.constants - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.cursors.rst b/docs/source/pyignite.cursors.rst new file mode 100644 index 0000000..6415a16 --- /dev/null +++ b/docs/source/pyignite.cursors.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.cursors module +======================= + +.. automodule:: pyignite.cursors + :members: + :undoc-members: + :inherited-members: diff --git a/docs/source/pyignite.datatypes.base.rst b/docs/source/pyignite.datatypes.base.rst index 849a028..c482904 100644 --- a/docs/source/pyignite.datatypes.base.rst +++ b/docs/source/pyignite.datatypes.base.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.base module ============================== diff --git a/docs/source/pyignite.datatypes.binary.rst b/docs/source/pyignite.datatypes.binary.rst index 0d175de..37de8b8 100644 --- a/docs/source/pyignite.datatypes.binary.rst +++ b/docs/source/pyignite.datatypes.binary.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.binary module ================================ diff --git a/docs/source/pyignite.datatypes.cache_config.rst b/docs/source/pyignite.datatypes.cache_config.rst index 3d5eaeb..4b63637 100644 --- a/docs/source/pyignite.datatypes.cache_config.rst +++ b/docs/source/pyignite.datatypes.cache_config.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.cache\_config module ======================================= diff --git a/docs/source/pyignite.datatypes.cache_properties.rst b/docs/source/pyignite.datatypes.cache_properties.rst index 57f0e9f..d626366 100644 --- a/docs/source/pyignite.datatypes.cache_properties.rst +++ b/docs/source/pyignite.datatypes.cache_properties.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.cache\_properties module =========================================== diff --git a/docs/source/pyignite.datatypes.complex.rst b/docs/source/pyignite.datatypes.complex.rst index 1e3f21e..83ecacc 100644 --- a/docs/source/pyignite.datatypes.complex.rst +++ b/docs/source/pyignite.datatypes.complex.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.complex module ================================= diff --git a/docs/source/pyignite.datatypes.internal.rst b/docs/source/pyignite.datatypes.internal.rst index 5dc5535..a3e5dcc 100644 --- a/docs/source/pyignite.datatypes.internal.rst +++ b/docs/source/pyignite.datatypes.internal.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.internal module ================================== diff --git a/docs/source/pyignite.datatypes.key_value.rst b/docs/source/pyignite.datatypes.key_value.rst index 0b3aa88..46d83dd 100644 --- a/docs/source/pyignite.datatypes.key_value.rst +++ b/docs/source/pyignite.datatypes.key_value.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.key\_value module ==================================== diff --git a/docs/source/pyignite.datatypes.null_object.rst b/docs/source/pyignite.datatypes.null_object.rst index 05f22b1..5d6381f 100644 --- a/docs/source/pyignite.datatypes.null_object.rst +++ b/docs/source/pyignite.datatypes.null_object.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.null\_object module ====================================== diff --git a/docs/source/pyignite.datatypes.primitive.rst b/docs/source/pyignite.datatypes.primitive.rst index 8a53604..3fa2797 100644 --- a/docs/source/pyignite.datatypes.primitive.rst +++ b/docs/source/pyignite.datatypes.primitive.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.primitive module =================================== diff --git a/docs/source/pyignite.datatypes.primitive_arrays.rst b/docs/source/pyignite.datatypes.primitive_arrays.rst index b4b94bf..d261235 100644 --- a/docs/source/pyignite.datatypes.primitive_arrays.rst +++ b/docs/source/pyignite.datatypes.primitive_arrays.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.primitive\_arrays module =========================================== diff --git a/docs/source/pyignite.datatypes.primitive_objects.rst b/docs/source/pyignite.datatypes.primitive_objects.rst index a74db38..e737f3c 100644 --- a/docs/source/pyignite.datatypes.primitive_objects.rst +++ b/docs/source/pyignite.datatypes.primitive_objects.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.primitive\_objects module ============================================ diff --git a/docs/source/pyignite.datatypes.prop_codes.rst b/docs/source/pyignite.datatypes.prop_codes.rst deleted file mode 100644 index d23596b..0000000 --- a/docs/source/pyignite.datatypes.prop_codes.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.datatypes.prop\_codes module -===================================== - -.. automodule:: pyignite.datatypes.prop_codes - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.datatypes.rst b/docs/source/pyignite.datatypes.rst index d72f844..269d500 100644 --- a/docs/source/pyignite.datatypes.rst +++ b/docs/source/pyignite.datatypes.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes package ========================== @@ -22,8 +37,6 @@ Submodules pyignite.datatypes.primitive pyignite.datatypes.primitive_arrays pyignite.datatypes.primitive_objects - pyignite.datatypes.prop_codes pyignite.datatypes.sql pyignite.datatypes.standard - pyignite.datatypes.type_codes diff --git a/docs/source/pyignite.datatypes.sql.rst b/docs/source/pyignite.datatypes.sql.rst index e20f084..8e564b8 100644 --- a/docs/source/pyignite.datatypes.sql.rst +++ b/docs/source/pyignite.datatypes.sql.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.sql module ============================= diff --git a/docs/source/pyignite.datatypes.standard.rst b/docs/source/pyignite.datatypes.standard.rst index e46d339..f181450 100644 --- a/docs/source/pyignite.datatypes.standard.rst +++ b/docs/source/pyignite.datatypes.standard.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.datatypes.standard module ================================== diff --git a/docs/source/pyignite.datatypes.type_codes.rst b/docs/source/pyignite.datatypes.type_codes.rst deleted file mode 100644 index 47baa4b..0000000 --- a/docs/source/pyignite.datatypes.type_codes.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.datatypes.type\_codes module -===================================== - -.. automodule:: pyignite.datatypes.type_codes - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.exceptions.rst b/docs/source/pyignite.exceptions.rst index dd24687..563ea90 100644 --- a/docs/source/pyignite.exceptions.rst +++ b/docs/source/pyignite.exceptions.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite.exceptions module ========================== diff --git a/docs/source/pyignite.queries.op_codes.rst b/docs/source/pyignite.queries.op_codes.rst deleted file mode 100644 index bc556ec..0000000 --- a/docs/source/pyignite.queries.op_codes.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.queries.op\_codes module -================================= - -.. automodule:: pyignite.queries.op_codes - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/source/pyignite.queries.rst b/docs/source/pyignite.queries.rst deleted file mode 100644 index 6dd81a2..0000000 --- a/docs/source/pyignite.queries.rst +++ /dev/null @@ -1,15 +0,0 @@ -pyignite.queries package -======================== - -.. automodule:: pyignite.queries - :members: - :undoc-members: - :show-inheritance: - -Submodules ----------- - -.. toctree:: - - pyignite.queries.op_codes - diff --git a/docs/source/pyignite.rst b/docs/source/pyignite.rst index 947cab2..85e31a8 100644 --- a/docs/source/pyignite.rst +++ b/docs/source/pyignite.rst @@ -1,3 +1,18 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + pyignite package ================ @@ -11,20 +26,20 @@ Subpackages .. toctree:: - pyignite.api - pyignite.connection pyignite.datatypes - pyignite.queries + pyignite.connection Submodules ---------- .. toctree:: - pyignite.binary - pyignite.cache - pyignite.client - pyignite.constants - pyignite.exceptions - pyignite.utils + pyignite.binary + pyignite.cache + pyignite.aio_cache + pyignite.client + pyignite.aio_client + pyignite.constants + pyignite.cursors + pyignite.exceptions diff --git a/docs/source/pyignite.utils.rst b/docs/source/pyignite.utils.rst deleted file mode 100644 index 5ee42ab..0000000 --- a/docs/source/pyignite.utils.rst +++ /dev/null @@ -1,7 +0,0 @@ -pyignite.utils module -===================== - -.. automodule:: pyignite.utils - :members: - :undoc-members: - :show-inheritance: diff --git a/examples/async_key_value.py b/examples/async_key_value.py new file mode 100644 index 0000000..76dac34 --- /dev/null +++ b/examples/async_key_value.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio + +from pyignite import AioClient + + +async def main(): + # Create client and connect. + client = AioClient() + async with client.connect('127.0.0.1', 10800): + # Create cache + cache = await client.get_or_create_cache('test_async_cache') + + # Load data concurrently. + await asyncio.gather( + *[cache.put(f'key_{i}', f'value_{i}') for i in range(0, 20)] + ) + + # Key-value queries. + print(await cache.get('key_10')) + print(await cache.get_all([f'key_{i}' for i in range(0, 10)])) + # value_10 + # {'key_3': 'value_3', 'key_2': 'value_2', 'key_1': 'value_1','....} + + # Scan query. + async with cache.scan() as cursor: + async for k, v in cursor: + print(f'key = {k}, value = {v}') + # key = key_42, value = value_42 + # key = key_43, value = value_43 + # key = key_40, value = value_40 + # key = key_41, value = value_41 + # key = key_37, value = value_37 + # key = key_51, value = value_51 + # key = key_20, value = value_20 + # ...... + + # Clean up. + await cache.destroy() + +loop = asyncio.get_event_loop() +loop.run_until_complete(main()) diff --git a/examples/async_sql.py b/examples/async_sql.py new file mode 100644 index 0000000..ffd2939 --- /dev/null +++ b/examples/async_sql.py @@ -0,0 +1,301 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from decimal import Decimal + +from pyignite import AioClient + + +COUNTRY_TABLE_NAME = 'Country' +CITY_TABLE_NAME = 'City' +LANGUAGE_TABLE_NAME = 'CountryLanguage' + +COUNTRY_CREATE_TABLE_QUERY = '''CREATE TABLE Country ( + Code CHAR(3) PRIMARY KEY, + Name CHAR(52), + Continent CHAR(50), + Region CHAR(26), + SurfaceArea DECIMAL(10,2), + IndepYear SMALLINT(6), + Population INT(11), + LifeExpectancy DECIMAL(3,1), + GNP DECIMAL(10,2), + GNPOld DECIMAL(10,2), + LocalName CHAR(45), + GovernmentForm CHAR(45), + HeadOfState CHAR(60), + Capital INT(11), + Code2 CHAR(2) +)''' + +COUNTRY_INSERT_QUERY = '''INSERT INTO Country( + Code, Name, Continent, Region, + SurfaceArea, IndepYear, Population, + LifeExpectancy, GNP, GNPOld, + LocalName, GovernmentForm, HeadOfState, + Capital, Code2 +) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' + +CITY_CREATE_TABLE_QUERY = '''CREATE TABLE City ( + ID INT(11), + Name CHAR(35), + CountryCode CHAR(3), + District CHAR(20), + Population INT(11), + PRIMARY KEY (ID, CountryCode) +) WITH "affinityKey=CountryCode"''' + +CITY_CREATE_INDEX = ''' +CREATE INDEX idx_country_code ON city (CountryCode)''' + +CITY_INSERT_QUERY = '''INSERT INTO City( + ID, Name, CountryCode, District, Population +) VALUES (?, ?, ?, ?, ?)''' + +LANGUAGE_CREATE_TABLE_QUERY = '''CREATE TABLE CountryLanguage ( + CountryCode CHAR(3), + Language CHAR(30), + IsOfficial BOOLEAN, + Percentage DECIMAL(4,1), + PRIMARY KEY (CountryCode, Language) +) WITH "affinityKey=CountryCode"''' + +LANGUAGE_CREATE_INDEX = ''' +CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)''' + +LANGUAGE_INSERT_QUERY = '''INSERT INTO CountryLanguage( + CountryCode, Language, IsOfficial, Percentage +) VALUES (?, ?, ?, ?)''' + +DROP_TABLE_QUERY = '''DROP TABLE {} IF EXISTS''' + +COUNTRY_DATA = [ + [ + 'USA', 'United States', 'North America', 'North America', + Decimal('9363520.00'), 1776, 278357000, + Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), + 'United States', 'Federal Republic', 'George W. Bush', + 3813, 'US', + ], + [ + 'IND', 'India', 'Asia', 'Southern and Central Asia', + Decimal('3287263.00'), 1947, 1013662000, + Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), + 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', + 1109, 'IN', + ], + [ + 'CHN', 'China', 'Asia', 'Eastern Asia', + Decimal('9572900.00'), -1523, 1277558000, + Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), + 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', + 1891, 'CN', + ], +] + +CITY_DATA = [ + [3793, 'New York', 'USA', 'New York', 8008278], + [3794, 'Los Angeles', 'USA', 'California', 3694820], + [3795, 'Chicago', 'USA', 'Illinois', 2896016], + [3796, 'Houston', 'USA', 'Texas', 1953631], + [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], + [3798, 'Phoenix', 'USA', 'Arizona', 1321045], + [3799, 'San Diego', 'USA', 'California', 1223400], + [3800, 'Dallas', 'USA', 'Texas', 1188580], + [3801, 'San Antonio', 'USA', 'Texas', 1144646], + [3802, 'Detroit', 'USA', 'Michigan', 951270], + [3803, 'San Jose', 'USA', 'California', 894943], + [3804, 'Indianapolis', 'USA', 'Indiana', 791926], + [3805, 'San Francisco', 'USA', 'California', 776733], + [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], + [1025, 'Delhi', 'IND', 'Delhi', 7206704], + [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], + [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], + [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], + [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], + [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], + [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], + [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], + [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], + [1034, 'Pune', 'IND', 'Maharashtra', 1566651], + [1035, 'Surat', 'IND', 'Gujarat', 1498817], + [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], + [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], + [1891, 'Peking', 'CHN', 'Peking', 7472000], + [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], + [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], + [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], + [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], + [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], + [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], + [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], + [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], + [1900, 'Changchun', 'CHN', 'Jilin', 2812000], + [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], + [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], + [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], + [1904, 'Jinan', 'CHN', 'Shandong', 2278100], + [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], + [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], +] + +LANGUAGE_DATA = [ + ['USA', 'Chinese', False, Decimal('0.6')], + ['USA', 'English', True, Decimal('86.2')], + ['USA', 'French', False, Decimal('0.7')], + ['USA', 'German', False, Decimal('0.7')], + ['USA', 'Italian', False, Decimal('0.6')], + ['USA', 'Japanese', False, Decimal('0.2')], + ['USA', 'Korean', False, Decimal('0.3')], + ['USA', 'Polish', False, Decimal('0.3')], + ['USA', 'Portuguese', False, Decimal('0.2')], + ['USA', 'Spanish', False, Decimal('7.5')], + ['USA', 'Tagalog', False, Decimal('0.4')], + ['USA', 'Vietnamese', False, Decimal('0.2')], + ['IND', 'Asami', False, Decimal('1.5')], + ['IND', 'Bengali', False, Decimal('8.2')], + ['IND', 'Gujarati', False, Decimal('4.8')], + ['IND', 'Hindi', True, Decimal('39.9')], + ['IND', 'Kannada', False, Decimal('3.9')], + ['IND', 'Malajalam', False, Decimal('3.6')], + ['IND', 'Marathi', False, Decimal('7.4')], + ['IND', 'Orija', False, Decimal('3.3')], + ['IND', 'Punjabi', False, Decimal('2.8')], + ['IND', 'Tamil', False, Decimal('6.3')], + ['IND', 'Telugu', False, Decimal('7.8')], + ['IND', 'Urdu', False, Decimal('5.1')], + ['CHN', 'Chinese', True, Decimal('92.0')], + ['CHN', 'Dong', False, Decimal('0.2')], + ['CHN', 'Hui', False, Decimal('0.8')], + ['CHN', 'Mantšu', False, Decimal('0.9')], + ['CHN', 'Miao', False, Decimal('0.7')], + ['CHN', 'Mongolian', False, Decimal('0.4')], + ['CHN', 'Puyi', False, Decimal('0.2')], + ['CHN', 'Tibetan', False, Decimal('0.4')], + ['CHN', 'Tujia', False, Decimal('0.5')], + ['CHN', 'Uighur', False, Decimal('0.6')], + ['CHN', 'Yi', False, Decimal('0.6')], + ['CHN', 'Zhuang', False, Decimal('1.4')], +] + + +async def main(): + # establish connection + client = AioClient() + async with client.connect('127.0.0.1', 10800): + # create tables + for query in [ + COUNTRY_CREATE_TABLE_QUERY, + CITY_CREATE_TABLE_QUERY, + LANGUAGE_CREATE_TABLE_QUERY, + ]: + await client.sql(query) + + # create indices + for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: + await client.sql(query) + + # load data concurrently. + await asyncio.gather(*[ + client.sql(COUNTRY_INSERT_QUERY, query_args=row) for row in COUNTRY_DATA + ]) + + await asyncio.gather(*[ + client.sql(CITY_INSERT_QUERY, query_args=row) for row in CITY_DATA + ]) + + await asyncio.gather(*[ + client.sql(LANGUAGE_INSERT_QUERY, query_args=row) for row in LANGUAGE_DATA + ]) + + # 10 most populated cities (with pagination) + MOST_POPULATED_QUERY = ''' + SELECT name, population FROM City ORDER BY population DESC LIMIT 10''' + + async with client.sql(MOST_POPULATED_QUERY) as cursor: + print('Most 10 populated cities:') + + async for row in cursor: + print(row) + # Most 10 populated cities: + # ['Mumbai (Bombay)', 10500000] + # ['Shanghai', 9696300] + # ['New York', 8008278] + # ['Peking', 7472000] + # ['Delhi', 7206704] + # ['Chongqing', 6351600] + # ['Tianjin', 5286800] + # ['Calcutta [Kolkata]', 4399819] + # ['Wuhan', 4344600] + # ['Harbin', 4289800] + + # 10 most populated cities in 3 countries (with pagination and header row) + MOST_POPULATED_IN_3_COUNTRIES_QUERY = ''' + SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country + JOIN city ON city.countrycode = country.code + WHERE country.code IN ('USA','IND','CHN') + GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 + ''' + + async with client.sql(MOST_POPULATED_IN_3_COUNTRIES_QUERY, include_field_names=True) as cursor: + print('Most 10 populated cities in USA, India and China:') + print(await cursor.__anext__()) + print('----------------------------------------') + async for row in cursor: + print(row) + # Most 10 populated cities in USA, India and China: + # ['COUNTRY_NAME', 'CITY_NAME', 'MAX_POP'] + # ---------------------------------------- + # ['India', 'Mumbai (Bombay)', 10500000] + # ['China', 'Shanghai', 9696300] + # ['United States', 'New York', 8008278] + # ['China', 'Peking', 7472000] + # ['India', 'Delhi', 7206704] + # ['China', 'Chongqing', 6351600] + # ['China', 'Tianjin', 5286800] + # ['India', 'Calcutta [Kolkata]', 4399819] + # ['China', 'Wuhan', 4344600] + # ['China', 'Harbin', 4289800] + + # show city info + CITY_INFO_QUERY = '''SELECT * FROM City WHERE id = ?''' + + async with client.sql(CITY_INFO_QUERY, query_args=[3802], include_field_names=True) as cursor: + field_names = await cursor.__anext__() + field_data = await cursor.__anext__() + + print('City info:') + for field_name, field_value in zip(field_names * len(field_data), field_data): + print('{}: {}'.format(field_name, field_value)) + # City info: + # ID: 3802 + # NAME: Detroit + # COUNTRYCODE: USA + # DISTRICT: Michigan + # POPULATION: 951270 + + # clean up concurrently. + await asyncio.gather(*[ + client.sql(DROP_TABLE_QUERY.format(table_name)) for table_name in [ + CITY_TABLE_NAME, + LANGUAGE_TABLE_NAME, + COUNTRY_TABLE_NAME, + ] + ]) + + +loop = asyncio.get_event_loop() +loop.run_until_complete(main()) diff --git a/examples/binary_basics.py b/examples/binary_basics.py index 96a9058..50fa933 100644 --- a/examples/binary_basics.py +++ b/examples/binary_basics.py @@ -16,7 +16,7 @@ from collections import OrderedDict from pyignite import Client, GenericObjectMeta -from pyignite.datatypes import * +from pyignite.datatypes import String, IntObject class Person(metaclass=GenericObjectMeta, schema=OrderedDict([ @@ -28,26 +28,25 @@ class Person(metaclass=GenericObjectMeta, schema=OrderedDict([ client = Client() -client.connect('localhost', 10800) +with client.connect('localhost', 10800): + person_cache = client.get_or_create_cache('person') -person_cache = client.get_or_create_cache('person') + person_cache.put( + 1, Person(first_name='Ivan', last_name='Ivanov', age=33) + ) -person_cache.put( - 1, Person(first_name='Ivan', last_name='Ivanov', age=33) -) + person = person_cache.get(1) + print(person.__class__.__name__) + # Person -person = person_cache.get(1) -print(person.__class__.__name__) -# Person + print(person.__class__ is Person) + # True if `Person` was registered automatically (on writing) + # or manually (using `client.register_binary_type()` method). + # False otherwise -print(person.__class__ is Person) -# True if `Person` was registered automatically (on writing) -# or manually (using `client.register_binary_type()` method). -# False otherwise + print(person) + # Person(first_name='Ivan', last_name='Ivanov', age=33, version=1) -print(person) -# Person(first_name='Ivan', last_name='Ivanov', age=33, version=1) + client.register_binary_type(Person) -client.register_binary_type(Person) - -Person = person.__class__ + Person = person.__class__ diff --git a/examples/create_binary.py b/examples/create_binary.py index b199527..d2c2ce4 100644 --- a/examples/create_binary.py +++ b/examples/create_binary.py @@ -17,87 +17,80 @@ from pyignite import Client, GenericObjectMeta from pyignite.datatypes import DoubleObject, IntObject, String -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_SQL_SCHEMA, PROP_QUERY_ENTITIES client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): + student_cache = client.create_cache({ + PROP_NAME: 'SQL_PUBLIC_STUDENT', + PROP_SQL_SCHEMA: 'PUBLIC', + PROP_QUERY_ENTITIES: [ + { + 'table_name': 'Student'.upper(), + 'key_field_name': 'SID', + 'key_type_name': 'java.lang.Integer', + 'field_name_aliases': [], + 'query_fields': [ + { + 'name': 'SID', + 'type_name': 'java.lang.Integer', + 'is_key_field': True, + 'is_notnull_constraint_field': True, + }, + { + 'name': 'NAME', + 'type_name': 'java.lang.String', + }, + { + 'name': 'LOGIN', + 'type_name': 'java.lang.String', + }, + { + 'name': 'AGE', + 'type_name': 'java.lang.Integer', + }, + { + 'name': 'GPA', + 'type_name': 'java.math.Double', + }, + ], + 'query_indexes': [], + 'value_type_name': 'SQL_PUBLIC_STUDENT_TYPE', + 'value_field_name': None, + }, + ], + }) -student_cache = client.create_cache({ - PROP_NAME: 'SQL_PUBLIC_STUDENT', - PROP_SQL_SCHEMA: 'PUBLIC', - PROP_QUERY_ENTITIES: [ - { - 'table_name': 'Student'.upper(), - 'key_field_name': 'SID', - 'key_type_name': 'java.lang.Integer', - 'field_name_aliases': [], - 'query_fields': [ - { - 'name': 'SID', - 'type_name': 'java.lang.Integer', - 'is_key_field': True, - 'is_notnull_constraint_field': True, - }, - { - 'name': 'NAME', - 'type_name': 'java.lang.String', - }, - { - 'name': 'LOGIN', - 'type_name': 'java.lang.String', - }, - { - 'name': 'AGE', - 'type_name': 'java.lang.Integer', - }, - { - 'name': 'GPA', - 'type_name': 'java.math.Double', - }, - ], - 'query_indexes': [], - 'value_type_name': 'SQL_PUBLIC_STUDENT_TYPE', - 'value_field_name': None, - }, - ], -}) + class Student( + metaclass=GenericObjectMeta, + type_name='SQL_PUBLIC_STUDENT_TYPE', + schema=OrderedDict([ + ('NAME', String), + ('LOGIN', String), + ('AGE', IntObject), + ('GPA', DoubleObject), + ]) + ): + pass + student_cache.put( + 1, + Student(LOGIN='jdoe', NAME='John Doe', AGE=17, GPA=4.25), + key_hint=IntObject + ) -class Student( - metaclass=GenericObjectMeta, - type_name='SQL_PUBLIC_STUDENT_TYPE', - schema=OrderedDict([ - ('NAME', String), - ('LOGIN', String), - ('AGE', IntObject), - ('GPA', DoubleObject), - ]) -): - pass + with client.sql(r'SELECT * FROM Student', include_field_names=True) as cursor: + print(next(cursor)) + # ['SID', 'NAME', 'LOGIN', 'AGE', 'GPA'] + print(*cursor) + # [1, 'John Doe', 'jdoe', 17, 4.25] -student_cache.put( - 1, - Student(LOGIN='jdoe', NAME='John Doe', AGE=17, GPA=4.25), - key_hint=IntObject -) + # DROP_QUERY = 'DROP TABLE Student' + # client.sql(DROP_QUERY) + # + # pyignite.exceptions.SQLError: class org.apache.ignite.IgniteCheckedException: + # Only cache created with CREATE TABLE may be removed with DROP TABLE + # [cacheName=SQL_PUBLIC_STUDENT] -result = client.sql( - r'SELECT * FROM Student', - include_field_names=True -) -print(next(result)) -# ['SID', 'NAME', 'LOGIN', 'AGE', 'GPA'] - -print(*result) -# [1, 'John Doe', 'jdoe', 17, 4.25] - -# DROP_QUERY = 'DROP TABLE Student' -# client.sql(DROP_QUERY) -# -# pyignite.exceptions.SQLError: class org.apache.ignite.IgniteCheckedException: -# Only cache created with CREATE TABLE may be removed with DROP TABLE -# [cacheName=SQL_PUBLIC_STUDENT] - -student_cache.destroy() -client.close() + student_cache.destroy() diff --git a/examples/failover.py b/examples/failover.py index 7911ce0..21ab547 100644 --- a/examples/failover.py +++ b/examples/failover.py @@ -15,7 +15,7 @@ from pyignite import Client from pyignite.datatypes.cache_config import CacheMode -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_MODE, PROP_BACKUPS_NUMBER from pyignite.exceptions import SocketError @@ -25,30 +25,32 @@ ('127.0.0.1', 10802), ] -client = Client(timeout=4.0) -client.connect(nodes) -print('Connected') -my_cache = client.get_or_create_cache({ - PROP_NAME: 'my_cache', - PROP_CACHE_MODE: CacheMode.PARTITIONED, - PROP_BACKUPS_NUMBER: 2, -}) -my_cache.put('test_key', 0) -test_value = 0 +def main(): + client = Client(timeout=4.0) + with client.connect(nodes): + print('Connected') -# abstract main loop -while True: - try: - # do the work - test_value = my_cache.get('test_key') or 0 - my_cache.put('test_key', test_value + 1) - except (OSError, SocketError) as e: - # recover from error (repeat last command, check data - # consistency or just continue − depends on the task) - print('Error: {}'.format(e)) - print('Last value: {}'.format(test_value)) - print('Reconnecting') + my_cache = client.get_or_create_cache({ + PROP_NAME: 'my_cache', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_BACKUPS_NUMBER: 2, + }) + my_cache.put('test_key', 0) + test_value = 0 + + # abstract main loop + while True: + try: + # do the work + test_value = my_cache.get('test_key') or 0 + my_cache.put('test_key', test_value + 1) + except (OSError, SocketError) as e: + # recover from error (repeat last command, check data + # consistency or just continue − depends on the task) + print(f'Error: {e}') + print(f'Last value: {test_value}') + print('Reconnecting') # Connected # Error: Connection broken. diff --git a/examples/get_and_put.py b/examples/get_and_put.py index 49c5108..053e4b7 100644 --- a/examples/get_and_put.py +++ b/examples/get_and_put.py @@ -16,26 +16,24 @@ from pyignite import Client client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): + my_cache = client.create_cache('my cache') -my_cache = client.create_cache('my cache') + my_cache.put('my key', 42) -my_cache.put('my key', 42) + result = my_cache.get('my key') + print(result) # 42 -result = my_cache.get('my key') -print(result) # 42 + result = my_cache.get('non-existent key') + print(result) # None -result = my_cache.get('non-existent key') -print(result) # None + result = my_cache.get_all([ + 'my key', + 'non-existent key', + 'other-key', + ]) + print(result) # {'my key': 42} -result = my_cache.get_all([ - 'my key', - 'non-existent key', - 'other-key', -]) -print(result) # {'my key': 42} + my_cache.clear_key('my key') -my_cache.clear_key('my key') - -my_cache.destroy() -client.close() + my_cache.destroy() diff --git a/examples/get_and_put_complex.py b/examples/get_and_put_complex.py index 2444612..cff0c2f 100644 --- a/examples/get_and_put_complex.py +++ b/examples/get_and_put_complex.py @@ -16,53 +16,52 @@ from collections import OrderedDict from pyignite import Client -from pyignite.datatypes import ( - CollectionObject, MapObject, ObjectArrayObject, -) +from pyignite.datatypes import CollectionObject, MapObject, ObjectArrayObject client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): + my_cache = client.get_or_create_cache('my cache') -my_cache = client.get_or_create_cache('my cache') + value = OrderedDict([(1, 'test'), ('key', 2.0)]) -value = OrderedDict([(1, 'test'), ('key', 2.0)]) + # saving ordered dictionary + type_id = MapObject.LINKED_HASH_MAP + my_cache.put('my dict', (type_id, value)) + result = my_cache.get('my dict') + print(result) # (2, OrderedDict([(1, 'test'), ('key', 2.0)])) -# saving ordered dictionary -type_id = MapObject.LINKED_HASH_MAP -my_cache.put('my dict', (type_id, value)) -result = my_cache.get('my dict') -print(result) # (2, OrderedDict([(1, 'test'), ('key', 2.0)])) + # saving unordered dictionary + type_id = MapObject.HASH_MAP + my_cache.put('my dict', (type_id, value)) + result = my_cache.get('my dict') + print(result) # (1, {'key': 2.0, 1: 'test'}) -# saving unordered dictionary -type_id = MapObject.HASH_MAP -my_cache.put('my dict', (type_id, value)) -result = my_cache.get('my dict') -print(result) # (1, {'key': 2.0, 1: 'test'}) + type_id = CollectionObject.LINKED_LIST + value = [1, '2', 3.0] -type_id = CollectionObject.LINKED_LIST -value = [1, '2', 3.0] + my_cache.put('my list', (type_id, value)) -my_cache.put('my list', (type_id, value)) + result = my_cache.get('my list') + print(result) # (2, [1, '2', 3.0]) -result = my_cache.get('my list') -print(result) # (2, [1, '2', 3.0]) + type_id = CollectionObject.HASH_SET + value = [4, 4, 'test', 5.6] -type_id = CollectionObject.HASH_SET -value = [4, 4, 'test', 5.6] + my_cache.put('my set', (type_id, value)) -my_cache.put('my set', (type_id, value)) + result = my_cache.get('my set') + print(result) # (3, [5.6, 4, 'test']) -result = my_cache.get('my set') -print(result) # (3, [5.6, 4, 'test']) + type_id = ObjectArrayObject.OBJECT + value = [7, '8', 9.0] -type_id = ObjectArrayObject.OBJECT -value = [7, '8', 9.0] + my_cache.put( + 'my array of objects', + (type_id, value), + value_hint=ObjectArrayObject # this hint is mandatory! + ) + result = my_cache.get('my array of objects') + print(result) # (-1, [7, '8', 9.0]) -my_cache.put( - 'my array of objects', - (type_id, value), - value_hint=ObjectArrayObject # this hint is mandatory! -) -result = my_cache.get('my array of objects') -print(result) # (-1, [7, '8', 9.0]) + my_cache.destroy() diff --git a/examples/migrate_binary.py b/examples/migrate_binary.py index f0b0f74..c22fa4f 100644 --- a/examples/migrate_binary.py +++ b/examples/migrate_binary.py @@ -106,18 +106,18 @@ class ExpenseVoucher( client = Client() -client.connect('127.0.0.1', 10800) -accounting = client.get_or_create_cache('accounting') +with client.connect('127.0.0.1', 10800): + accounting = client.get_or_create_cache('accounting') -for key, value in old_data: - accounting.put(key, ExpenseVoucher(**value)) + for key, value in old_data: + accounting.put(key, ExpenseVoucher(**value)) -data_classes = client.query_binary_type('ExpenseVoucher') -print(data_classes) -# { -# -231598180: -# } + data_classes = client.query_binary_type('ExpenseVoucher') + print(data_classes) + # { + # -231598180: + # } s_id, data_class = data_classes.popitem() schema = data_class.schema @@ -182,9 +182,11 @@ def migrate(cache, data, new_class): # migrate data -result = accounting.scan() -migrate(accounting, result, ExpenseVoucherV2) +with client.connect('127.0.0.1', 10800): + accounting = client.get_or_create_cache('accounting') + + with accounting.scan() as cursor: + migrate(accounting, cursor, ExpenseVoucherV2) -# cleanup -accounting.destroy() -client.close() + # cleanup + accounting.destroy() diff --git a/examples/read_binary.py b/examples/read_binary.py index 3a8e9e2..fe642d8 100644 --- a/examples/read_binary.py +++ b/examples/read_binary.py @@ -16,7 +16,7 @@ from decimal import Decimal from pyignite import Client -from pyignite.datatypes.prop_codes import * +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_QUERY_ENTITIES COUNTRY_TABLE_NAME = 'Country' @@ -194,82 +194,98 @@ # establish connection client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): -# create tables -for query in [ - COUNTRY_CREATE_TABLE_QUERY, - CITY_CREATE_TABLE_QUERY, - LANGUAGE_CREATE_TABLE_QUERY, -]: - client.sql(query) + # create tables + for query in [ + COUNTRY_CREATE_TABLE_QUERY, + CITY_CREATE_TABLE_QUERY, + LANGUAGE_CREATE_TABLE_QUERY, + ]: + client.sql(query) -# create indices -for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: - client.sql(query) + # create indices + for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: + client.sql(query) -# load data -for row in COUNTRY_DATA: - client.sql(COUNTRY_INSERT_QUERY, query_args=row) + # load data + for row in COUNTRY_DATA: + client.sql(COUNTRY_INSERT_QUERY, query_args=row) -for row in CITY_DATA: - client.sql(CITY_INSERT_QUERY, query_args=row) + for row in CITY_DATA: + client.sql(CITY_INSERT_QUERY, query_args=row) -for row in LANGUAGE_DATA: - client.sql(LANGUAGE_INSERT_QUERY, query_args=row) + for row in LANGUAGE_DATA: + client.sql(LANGUAGE_INSERT_QUERY, query_args=row) -# examine the storage -result = client.get_cache_names() -print(result) -# [ -# 'SQL_PUBLIC_CITY', -# 'SQL_PUBLIC_COUNTRY', -# 'PUBLIC', -# 'SQL_PUBLIC_COUNTRYLANGUAGE' -# ] + # examine the storage + result = client.get_cache_names() + print(result) + # [ + # 'SQL_PUBLIC_CITY', + # 'SQL_PUBLIC_COUNTRY', + # 'PUBLIC', + # 'SQL_PUBLIC_COUNTRYLANGUAGE' + # ] -city_cache = client.get_or_create_cache('SQL_PUBLIC_CITY') -print(city_cache.settings[PROP_NAME]) -# 'SQL_PUBLIC_CITY' + city_cache = client.get_or_create_cache('SQL_PUBLIC_CITY') + print(city_cache.settings[PROP_NAME]) + # 'SQL_PUBLIC_CITY' -print(city_cache.settings[PROP_QUERY_ENTITIES]) -# { -# 'key_type_name': ( -# 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d_KEY' -# ), -# 'value_type_name': ( -# 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d' -# ), -# 'table_name': 'CITY', -# 'query_fields': [ -# ... -# ], -# 'field_name_aliases': [ -# ... -# ], -# 'query_indexes': [] -# } + print(city_cache.settings[PROP_QUERY_ENTITIES]) + # { + # 'key_type_name': ( + # 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d_KEY' + # ), + # 'value_type_name': ( + # 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d' + # ), + # 'table_name': 'CITY', + # 'query_fields': [ + # ... + # ], + # 'field_name_aliases': [ + # ... + # ], + # 'query_indexes': [] + # } -result = city_cache.scan() -print(next(result)) -# ( -# SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43_KEY( -# ID=1890, -# COUNTRYCODE='CHN', -# version=1 -# ), -# SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43( -# NAME='Shanghai', -# DISTRICT='Shanghai', -# POPULATION=9696300, -# version=1 -# ) -# ) + with city_cache.scan() as cursor: + print(next(cursor)) + # ( + # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43_KEY( + # ID=1890, + # COUNTRYCODE='CHN', + # version=1 + # ), + # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43( + # NAME='Shanghai', + # DISTRICT='Shanghai', + # POPULATION=9696300, + # version=1 + # ) + # ) -# clean up -for table_name in [ - CITY_TABLE_NAME, - LANGUAGE_TABLE_NAME, - COUNTRY_TABLE_NAME, -]: - result = client.sql(DROP_TABLE_QUERY.format(table_name)) + with client.sql('SELECT _KEY, _VAL FROM CITY WHERE ID = ?', query_args=[1890]) as cursor: + print(next(cursor)) + # ( + # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43_KEY( + # ID=1890, + # COUNTRYCODE='CHN', + # version=1 + # ), + # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43( + # NAME='Shanghai', + # DISTRICT='Shanghai', + # POPULATION=9696300, + # version=1 + # ) + # ) + + # clean up + for table_name in [ + CITY_TABLE_NAME, + LANGUAGE_TABLE_NAME, + COUNTRY_TABLE_NAME, + ]: + result = client.sql(DROP_TABLE_QUERY.format(table_name)) diff --git a/examples/readme.md b/examples/readme.md index 3628c82..3caf6c1 100644 --- a/examples/readme.md +++ b/examples/readme.md @@ -4,11 +4,11 @@ This directory contains the following example files: - `binary_basics.py` − basic operations with Complex objects, - `binary_types.py` - read SQL table as a key-value cache, -- `create_binary.py` − create SQL row with key-value operation, +- `create_binary.py` − create SQL row with key-value operation, - `failover.py` − fail-over connection to Ignite cluster, -- `get_and_put.py` − basic key-value operations, +- `get_and_put.py` − basic key-value operations, - `migrate_binary.py` − work with Complex object schemas, -- `scans.py` − cache scan operation, +- `scans.py` − cache scan operation, - `sql.py` − use Ignite SQL, - `type_hints.py` − type hints. diff --git a/examples/scans.py b/examples/scans.py index d5f2b48..eaafa6e 100644 --- a/examples/scans.py +++ b/examples/scans.py @@ -16,40 +16,37 @@ from pyignite import Client client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): + my_cache = client.create_cache('my cache') + my_cache.put_all({'key_{}'.format(v): v for v in range(20)}) + # { + # 'key_0': 0, + # 'key_1': 1, + # 'key_2': 2, + # ... 20 elements in total... + # 'key_18': 18, + # 'key_19': 19 + # } -my_cache = client.create_cache('my cache') + with my_cache.scan() as cursor: + for k, v in cursor: + print(k, v) + # 'key_17' 17 + # 'key_10' 10 + # 'key_6' 6, + # ... 20 elements in total... + # 'key_16' 16 + # 'key_12' 12 -my_cache.put_all({'key_{}'.format(v): v for v in range(20)}) -# { -# 'key_0': 0, -# 'key_1': 1, -# 'key_2': 2, -# ... 20 elements in total... -# 'key_18': 18, -# 'key_19': 19 -# } + with my_cache.scan() as cursor: + print(dict(cursor)) + # { + # 'key_17': 17, + # 'key_10': 10, + # 'key_6': 6, + # ... 20 elements in total... + # 'key_16': 16, + # 'key_12': 12 + # } -result = my_cache.scan() -for k, v in result: - print(k, v) -# 'key_17' 17 -# 'key_10' 10 -# 'key_6' 6, -# ... 20 elements in total... -# 'key_16' 16 -# 'key_12' 12 - -result = my_cache.scan() -print(dict(result)) -# { -# 'key_17': 17, -# 'key_10': 10, -# 'key_6': 6, -# ... 20 elements in total... -# 'key_16': 16, -# 'key_12': 12 -# } - -my_cache.destroy() -client.close() + my_cache.destroy() diff --git a/examples/sql.py b/examples/sql.py index 0e8c729..d81ff26 100644 --- a/examples/sql.py +++ b/examples/sql.py @@ -193,106 +193,99 @@ # establish connection client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): -# create tables -for query in [ - COUNTRY_CREATE_TABLE_QUERY, - CITY_CREATE_TABLE_QUERY, - LANGUAGE_CREATE_TABLE_QUERY, -]: - client.sql(query) + # create tables + for query in [ + COUNTRY_CREATE_TABLE_QUERY, + CITY_CREATE_TABLE_QUERY, + LANGUAGE_CREATE_TABLE_QUERY, + ]: + client.sql(query) -# create indices -for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: - client.sql(query) + # create indices + for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: + client.sql(query) -# load data -for row in COUNTRY_DATA: - client.sql(COUNTRY_INSERT_QUERY, query_args=row) + # load data + for row in COUNTRY_DATA: + client.sql(COUNTRY_INSERT_QUERY, query_args=row) -for row in CITY_DATA: - client.sql(CITY_INSERT_QUERY, query_args=row) + for row in CITY_DATA: + client.sql(CITY_INSERT_QUERY, query_args=row) -for row in LANGUAGE_DATA: - client.sql(LANGUAGE_INSERT_QUERY, query_args=row) + for row in LANGUAGE_DATA: + client.sql(LANGUAGE_INSERT_QUERY, query_args=row) -# 10 most populated cities (with pagination) -MOST_POPULATED_QUERY = ''' -SELECT name, population FROM City ORDER BY population DESC LIMIT 10''' + # 10 most populated cities (with pagination) + MOST_POPULATED_QUERY = ''' + SELECT name, population FROM City ORDER BY population DESC LIMIT 10''' -result = client.sql(MOST_POPULATED_QUERY) -print('Most 10 populated cities:') -for row in result: - print(row) -# Most 10 populated cities: -# ['Mumbai (Bombay)', 10500000] -# ['Shanghai', 9696300] -# ['New York', 8008278] -# ['Peking', 7472000] -# ['Delhi', 7206704] -# ['Chongqing', 6351600] -# ['Tianjin', 5286800] -# ['Calcutta [Kolkata]', 4399819] -# ['Wuhan', 4344600] -# ['Harbin', 4289800] + with client.sql(MOST_POPULATED_QUERY) as cursor: + print('Most 10 populated cities:') + for row in cursor: + print(row) + # Most 10 populated cities: + # ['Mumbai (Bombay)', 10500000] + # ['Shanghai', 9696300] + # ['New York', 8008278] + # ['Peking', 7472000] + # ['Delhi', 7206704] + # ['Chongqing', 6351600] + # ['Tianjin', 5286800] + # ['Calcutta [Kolkata]', 4399819] + # ['Wuhan', 4344600] + # ['Harbin', 4289800] -# 10 most populated cities in 3 countries (with pagination and header row) -MOST_POPULATED_IN_3_COUNTRIES_QUERY = ''' -SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country - JOIN city ON city.countrycode = country.code - WHERE country.code IN ('USA','IND','CHN') - GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 -''' + # 10 most populated cities in 3 countries (with pagination and header row) + MOST_POPULATED_IN_3_COUNTRIES_QUERY = ''' + SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country + JOIN city ON city.countrycode = country.code + WHERE country.code IN ('USA','IND','CHN') + GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 + ''' -result = client.sql( - MOST_POPULATED_IN_3_COUNTRIES_QUERY, - include_field_names=True, -) -print('Most 10 populated cities in USA, India and China:') -print(next(result)) -print('----------------------------------------') -for row in result: - print(row) -# Most 10 populated cities in USA, India and China: -# ['COUNTRY_NAME', 'CITY_NAME', 'MAX_POP'] -# ---------------------------------------- -# ['India', 'Mumbai (Bombay)', 10500000] -# ['China', 'Shanghai', 9696300] -# ['United States', 'New York', 8008278] -# ['China', 'Peking', 7472000] -# ['India', 'Delhi', 7206704] -# ['China', 'Chongqing', 6351600] -# ['China', 'Tianjin', 5286800] -# ['India', 'Calcutta [Kolkata]', 4399819] -# ['China', 'Wuhan', 4344600] -# ['China', 'Harbin', 4289800] + with client.sql(MOST_POPULATED_IN_3_COUNTRIES_QUERY, include_field_names=True) as cursor: + print('Most 10 populated cities in USA, India and China:') + print(next(cursor)) + print('----------------------------------------') + for row in cursor: + print(row) + # Most 10 populated cities in USA, India and China: + # ['COUNTRY_NAME', 'CITY_NAME', 'MAX_POP'] + # ---------------------------------------- + # ['India', 'Mumbai (Bombay)', 10500000] + # ['China', 'Shanghai', 9696300] + # ['United States', 'New York', 8008278] + # ['China', 'Peking', 7472000] + # ['India', 'Delhi', 7206704] + # ['China', 'Chongqing', 6351600] + # ['China', 'Tianjin', 5286800] + # ['India', 'Calcutta [Kolkata]', 4399819] + # ['China', 'Wuhan', 4344600] + # ['China', 'Harbin', 4289800] -# show city info -CITY_INFO_QUERY = '''SELECT * FROM City WHERE id = ?''' + # show city info + CITY_INFO_QUERY = '''SELECT * FROM City WHERE id = ?''' -result = client.sql( - CITY_INFO_QUERY, - query_args=[3802], - include_field_names=True, -) -field_names = next(result) -field_data = list(*result) + with client.sql(CITY_INFO_QUERY, query_args=[3802], include_field_names=True) as cursor: + field_names = next(cursor) + field_data = list(*cursor) -print('City info:') -for field_name, field_value in zip(field_names * len(field_data), field_data): - print('{}: {}'.format(field_name, field_value)) -# City info: -# ID: 3802 -# NAME: Detroit -# COUNTRYCODE: USA -# DISTRICT: Michigan -# POPULATION: 951270 + print('City info:') + for field_name, field_value in zip(field_names * len(field_data), field_data): + print('{}: {}'.format(field_name, field_value)) + # City info: + # ID: 3802 + # NAME: Detroit + # COUNTRYCODE: USA + # DISTRICT: Michigan + # POPULATION: 951270 -# clean up -for table_name in [ - CITY_TABLE_NAME, - LANGUAGE_TABLE_NAME, - COUNTRY_TABLE_NAME, -]: - result = client.sql(DROP_TABLE_QUERY.format(table_name)) + # clean up + for table_name in [ + CITY_TABLE_NAME, + LANGUAGE_TABLE_NAME, + COUNTRY_TABLE_NAME, + ]: + result = client.sql(DROP_TABLE_QUERY.format(table_name)) diff --git a/examples/type_hints.py b/examples/type_hints.py index 4cc44c0..8d53bf9 100644 --- a/examples/type_hints.py +++ b/examples/type_hints.py @@ -17,35 +17,34 @@ from pyignite.datatypes import CharObject, ShortObject client = Client() -client.connect('127.0.0.1', 10800) +with client.connect('127.0.0.1', 10800): -my_cache = client.get_or_create_cache('my cache') + my_cache = client.get_or_create_cache('my cache') -my_cache.put('my key', 42) -# value ‘42’ takes 9 bytes of memory as a LongObject + my_cache.put('my key', 42) + # value ‘42’ takes 9 bytes of memory as a LongObject -my_cache.put('my key', 42, value_hint=ShortObject) -# value ‘42’ takes only 3 bytes as a ShortObject + my_cache.put('my key', 42, value_hint=ShortObject) + # value ‘42’ takes only 3 bytes as a ShortObject -my_cache.put('a', 1) -# ‘a’ is a key of type String + my_cache.put('a', 1) + # ‘a’ is a key of type String -my_cache.put('a', 2, key_hint=CharObject) -# another key ‘a’ of type CharObject was created + my_cache.put('a', 2, key_hint=CharObject) + # another key ‘a’ of type CharObject was created -value = my_cache.get('a') -print(value) -# 1 + value = my_cache.get('a') + print(value) + # 1 -value = my_cache.get('a', key_hint=CharObject) -print(value) -# 2 + value = my_cache.get('a', key_hint=CharObject) + print(value) + # 2 -# now let us delete both keys at once -my_cache.remove_keys([ - 'a', # a default type key - ('a', CharObject), # a key of type CharObject -]) + # now let us delete both keys at once + my_cache.remove_keys([ + 'a', # a default type key + ('a', CharObject), # a key of type CharObject + ]) -my_cache.destroy() -client.close() + my_cache.destroy() diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py index 24d4bce..32f2cb2 100644 --- a/pyignite/aio_cache.py +++ b/pyignite/aio_cache.py @@ -471,7 +471,7 @@ async def get_size(self, peek_modes=None): conn = await self._get_best_node() return await cache_get_size_async(conn, self._cache_id, peek_modes) - def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False): + def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> AioScanCursor: """ Returns all key-value pairs from the cache, similar to `get_all`, but with internal pagination, which is slower, but safer. diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 1870878..7a5959d 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -282,7 +282,7 @@ async def _get_affinity(self, conn: 'AioConnection', caches: Iterable[int]) -> D of an intermittent error (most probably “Getting affinity for topology version earlier than affinity is calculated”). - :param conn: connection to Igneite server, + :param conn: connection to Ignite server, :param caches: Ids of caches, :return: OP_CACHE_PARTITIONS operation result value. """ @@ -414,7 +414,7 @@ def sql( lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, timeout: int = 0, cache: Union[int, str, 'AioCache'] = None - ): + ) -> AioSqlFieldsCursor: """ Runs an SQL query and returns its result. @@ -447,9 +447,9 @@ def sql( (all rows), :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :param cache (optional) Name or ID of the cache to use to infer schema. + :param cache: (optional) Name or ID of the cache to use to infer schema. If set, 'schema' argument is ignored, - :return: generator with result rows as a lists. If + :return: async sql fields cursor with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ diff --git a/pyignite/cache.py b/pyignite/cache.py index f00f000..3c93637 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -577,7 +577,7 @@ def get_size(self, peek_modes=None): self._get_best_node(), self._cache_id, peek_modes ) - def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False): + def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> ScanCursor: """ Returns all key-value pairs from the cache, similar to `get_all`, but with internal pagination, which is slower, but safer. @@ -596,7 +596,7 @@ def select_row( self, query_str: str, page_size: int = 1, query_args: Optional[list] = None, distributed_joins: bool = False, replicated_only: bool = False, local: bool = False, timeout: int = 0 - ): + ) -> SqlCursor: """ Executes a simplified SQL SELECT query over data stored in the cache. The query returns the whole record (key and value). diff --git a/pyignite/client.py b/pyignite/client.py index b7c4046..17e9d80 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -681,7 +681,7 @@ def sql( lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, timeout: int = 0, cache: Union[int, str, Cache] = None - ): + ) -> SqlFieldsCursor: """ Runs an SQL query and returns its result. @@ -693,9 +693,9 @@ def sql( :param schema: (optional) schema for the query. Defaults to `PUBLIC`, :param statement_type: (optional) statement type. Can be: - * StatementType.ALL − any type (default), + * StatementType.ALL − any type (default), * StatementType.SELECT − select, - * StatementType.UPDATE − update. + * StatementType.UPDATE − update. :param distributed_joins: (optional) distributed joins. Defaults to False, @@ -714,9 +714,9 @@ def sql( (all rows), :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :param cache (optional) Name or ID of the cache to use to infer schema. + :param cache: (optional) Name or ID of the cache to use to infer schema. If set, 'schema' argument is ignored, - :return: generator with result rows as a lists. If + :return: sql fields cursor with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ diff --git a/pyignite/cursors.py b/pyignite/cursors.py index c699556..0a8f0b0 100644 --- a/pyignite/cursors.py +++ b/pyignite/cursors.py @@ -32,6 +32,9 @@ class BaseCursorMixin: @property def connection(self): + """ + Ignite cluster connection. + """ return getattr(self, '_conn', None) @connection.setter @@ -40,6 +43,9 @@ def connection(self, value): @property def cursor_id(self): + """ + Cursor id. + """ return getattr(self, '_cursor_id', None) @cursor_id.setter @@ -48,6 +54,9 @@ def cursor_id(self, value): @property def more(self): + """ + Whether cursor has more values. + """ return getattr(self, '_more', None) @more.setter @@ -56,6 +65,9 @@ def more(self, value): @property def cache_id(self): + """ + Cache id. + """ return getattr(self, '_cache_id', None) @cache_id.setter @@ -64,6 +76,9 @@ def cache_id(self, value): @property def client(self): + """ + Apache Ignite client. + """ return getattr(self, '_client', None) @client.setter @@ -72,6 +87,9 @@ def client(self, value): @property def data(self): + """ + Current fetched data. + """ return getattr(self, '_data', None) @data.setter @@ -90,6 +108,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.close() def close(self): + """ + Close cursor. + """ if self.connection and self.cursor_id and self.more: resource_close(self.connection, self.cursor_id) @@ -105,6 +126,9 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close() async def close(self): + """ + Close cursor. + """ if self.connection and self.cursor_id and self.more: await resource_close_async(self.connection, self.cursor_id) @@ -132,7 +156,17 @@ def _process_page_response(self, result): class ScanCursor(AbstractScanCursor, CursorMixin): + """ + Synchronous scan cursor. + """ def __init__(self, client, cache_id, page_size, partitions, local): + """ + :param client: Synchronous Apache Ignite client. + :param cache_id: Cache id. + :param page_size: page size. + :param partitions: number of partitions to query (negative to query entire cache). + :param local: pass True if this query should be executed on local node only. + """ super().__init__(client, cache_id, page_size, partitions, local) self.connection = self.client.random_node @@ -156,7 +190,17 @@ def __next__(self): class AioScanCursor(AbstractScanCursor, AioCursorMixin): + """ + Asynchronous scan query cursor. + """ def __init__(self, client, cache_id, page_size, partitions, local): + """ + :param client: Asynchronous Apache Ignite client. + :param cache_id: Cache id. + :param page_size: page size. + :param partitions: number of partitions to query (negative to query entire cache). + :param local: pass True if this query should be executed on local node only. + """ super().__init__(client, cache_id, page_size, partitions, local) async def __aenter__(self): @@ -191,7 +235,14 @@ async def __anext__(self): class SqlCursor(CursorMixin): + """ + Synchronous SQL query cursor. + """ def __init__(self, client, cache_id, *args, **kwargs): + """ + :param client: Synchronous Apache Ignite client. + :param cache_id: Cache id. + """ self.client = client self.cache_id = cache_id self.connection = self.client.random_node @@ -241,7 +292,14 @@ def _finalize_init(self, result): class SqlFieldsCursor(AbstractSqlFieldsCursor, CursorMixin): + """ + Synchronous SQL fields query cursor. + """ def __init__(self, client, cache_id, *args, **kwargs): + """ + :param client: Synchronous Apache Ignite client. + :param cache_id: Cache id. + """ super().__init__(client, cache_id) self.connection = self.client.random_node self._finalize_init(sql_fields(self.connection, self.cache_id, *args, **kwargs)) @@ -273,7 +331,14 @@ def __next__(self): class AioSqlFieldsCursor(AbstractSqlFieldsCursor, AioCursorMixin): + """ + Asynchronous SQL fields query cursor. + """ def __init__(self, client, cache_id, *args, **kwargs): + """ + :param client: Synchronous Apache Ignite client. + :param cache_id: Cache id. + """ super().__init__(client, cache_id) self._params = (args, kwargs) From 3586db7c30e22aff61d2251fec1ffaa355b4c599 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Mon, 12 Apr 2021 12:50:49 +0300 Subject: [PATCH 29/62] IGNITE-14511 Fix serialization of bytes, improve serialization-deserialization of collections. - Fixes #30. --- pyignite/binary.py | 2 +- pyignite/datatypes/base.py | 6 +- pyignite/datatypes/cache_properties.py | 14 +- pyignite/datatypes/complex.py | 595 +++++++++++------------- pyignite/datatypes/internal.py | 199 ++++---- pyignite/datatypes/primitive.py | 14 +- pyignite/datatypes/primitive_arrays.py | 142 +++--- pyignite/datatypes/primitive_objects.py | 21 +- pyignite/datatypes/standard.py | 280 +++++------ pyignite/queries/response.py | 64 +-- tests/common/test_datatypes.py | 36 +- tests/common/test_key_value.py | 11 +- tests/common/test_sql.py | 128 +++++ 13 files changed, 783 insertions(+), 729 deletions(-) diff --git a/pyignite/binary.py b/pyignite/binary.py index 5a5f895..551f1d0 100644 --- a/pyignite/binary.py +++ b/pyignite/binary.py @@ -151,7 +151,7 @@ async def _from_python_async(self, stream, save_to_buf=False): write_footer(self, stream, header, header_class, schema_items, offsets, initial_pos, save_to_buf) def write_header(obj, stream): - header_class = BinaryObject.build_header() + header_class = BinaryObject.get_header_class() header = header_class() header.type_code = int.from_bytes( BinaryObject.type_code, diff --git a/pyignite/datatypes/base.py b/pyignite/datatypes/base.py index fbd798b..87b251c 100644 --- a/pyignite/datatypes/base.py +++ b/pyignite/datatypes/base.py @@ -72,9 +72,9 @@ async def from_python_async(cls, stream, value, **kwargs): cls.from_python(stream, value, **kwargs) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): + def to_python(cls, ctypes_object, *args, **kwargs): raise NotImplementedError @classmethod - async def to_python_async(cls, ctype_object, *args, **kwargs): - return cls.to_python(ctype_object, *args, **kwargs) + async def to_python_async(cls, ctypes_object, *args, **kwargs): + return cls.to_python(ctypes_object, *args, **kwargs) diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index d924507..9bf34de 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -115,12 +115,12 @@ async def parse_async(cls, stream): return cls.parse(stream) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return cls.prop_data_class.to_python(ctype_object.data, *args, **kwargs) + def to_python(cls, ctypes_object, *args, **kwargs): + return cls.prop_data_class.to_python(ctypes_object.data, *args, **kwargs) @classmethod - async def to_python_async(cls, ctype_object, *args, **kwargs): - return cls.to_python(ctype_object, *args, **kwargs) + async def to_python_async(cls, ctypes_object, *args, **kwargs): + return cls.to_python(ctypes_object, *args, **kwargs) @classmethod def from_python(cls, stream, value): @@ -295,6 +295,6 @@ def from_python(cls, stream, value): ) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - prop_data_class = prop_map(ctype_object.prop_code) - return prop_data_class.to_python(ctype_object.data, *args, **kwargs) + def to_python(cls, ctypes_object, *args, **kwargs): + prop_data_class = prop_map(ctypes_object.prop_code) + return prop_data_class.to_python(ctypes_object.data, *args, **kwargs) diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index 5cb6160..119c552 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -20,6 +20,7 @@ from pyignite.constants import * from pyignite.exceptions import ParseError +from .base import IgniteDataType from .internal import AnyDataObject, Struct, infer_from_python, infer_from_python_async from .type_codes import * from .type_ids import * @@ -41,122 +42,100 @@ class ObjectArrayObject(Nullable): _type_name = NAME_OBJ_ARR _type_id = TYPE_OBJ_ARR + _fields = [ + ('type_code', ctypes.c_byte), + ('type_id', ctypes.c_int), + ('length', ctypes.c_int) + ] type_code = TC_OBJECT_ARRAY - @classmethod - def build_header(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('type_id', ctypes.c_int), - ('length', ctypes.c_int), - ], - } - ) - @classmethod def parse_not_null(cls, stream): - header, header_class = cls.__parse_header(stream) + length, fields = cls.__get_length(stream), [] - fields = [] - for i in range(header.length): + for i in range(length): c_type = AnyDataObject.parse(stream) - fields.append(('element_{}'.format(i), c_type)) + fields.append((f'element_{i}', c_type)) - return cls.__build_final_class(header_class, fields) + return cls.__build_final_class(fields) @classmethod async def parse_not_null_async(cls, stream): - header, header_class = cls.__parse_header(stream) - - fields = [] - for i in range(header.length): + length, fields = cls.__get_length(stream), [] + for i in range(length): c_type = await AnyDataObject.parse_async(stream) - fields.append(('element_{}'.format(i), c_type)) + fields.append((f'element_{i}', c_type)) - return cls.__build_final_class(header_class, fields) + return cls.__build_final_class(fields) @classmethod - def __parse_header(cls, stream): - header_class = cls.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) - return header, header_class + def __get_length(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz + int_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(2 * int_sz + b_sz, SEEK_CUR) + return length @classmethod - def __build_final_class(cls, header_class, fields): + def __build_final_class(cls, fields): return type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, - '_fields_': fields, + '_fields_': cls._fields + fields, } ) @classmethod - def to_python_not_null(cls, ctype_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, *args, **kwargs): result = [] - for i in range(ctype_object.length): + for i in range(ctypes_object.length): result.append( AnyDataObject.to_python( - getattr(ctype_object, 'element_{}'.format(i)), + getattr(ctypes_object, f'element_{i}'), *args, **kwargs ) ) - return ctype_object.type_id, result + return ctypes_object.type_id, result @classmethod - async def to_python_not_null_async(cls, ctype_object, *args, **kwargs): + async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): result = [ await AnyDataObject.to_python_async( - getattr(ctype_object, 'element_{}'.format(i)), *args, **kwargs + getattr(ctypes_object, f'element_{i}'), *args, **kwargs ) - for i in range(ctype_object.length)] - return ctype_object.type_id, result + for i in range(ctypes_object.length)] + return ctypes_object.type_id, result @classmethod def from_python_not_null(cls, stream, value, *args, **kwargs): - type_or_id, value = value - try: - length = len(value) - except TypeError: - value = [value] - length = 1 - - cls.__write_header(stream, type_or_id, length) + value = cls.__write_header(stream, value) for x in value: infer_from_python(stream, x) @classmethod async def from_python_not_null_async(cls, stream, value, *args, **kwargs): - type_or_id, value = value + value = cls.__write_header(stream, value) + for x in value: + await infer_from_python_async(stream, x) + + @classmethod + def __write_header(cls, stream, value): + type_id, value = value try: length = len(value) except TypeError: value = [value] length = 1 - cls.__write_header(stream, type_or_id, length) - for x in value: - await infer_from_python_async(stream, x) + stream.write(cls.type_code) + stream.write(type_id.to_bytes(ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + stream.write(length.to_bytes(ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER)) - @classmethod - def __write_header(cls, stream, type_or_id, length): - header_class = cls.build_header() - header = header_class() - header.type_code = int.from_bytes( - cls.type_code, - byteorder=PROTOCOL_BYTE_ORDER - ) - header.length = length - header.type_id = type_or_id - - stream.write(header) + return value class WrappedDataObject(Nullable): @@ -170,32 +149,23 @@ class WrappedDataObject(Nullable): """ type_code = TC_ARRAY_WRAPPED_OBJECTS - @classmethod - def build_header(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ], - } - ) - @classmethod def parse_not_null(cls, stream): - header_class = cls.build_header() - header = stream.read_ctype(header_class) + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) final_class = type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': [ - ('payload', ctypes.c_byte * header.length), + ('type_code', ctypes.c_byte), + ('length', ctypes.c_int), + ('payload', ctypes.c_byte * length), ('offset', ctypes.c_int), ], } @@ -205,11 +175,11 @@ def parse_not_null(cls, stream): return final_class @classmethod - def to_python_not_null(cls, ctype_object, *args, **kwargs): - return bytes(ctype_object.payload), ctype_object.offset + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + return bytes(ctypes_object.payload), ctypes_object.offset @classmethod - def from_python(cls, stream, value, *args, **kwargs): + def from_python_not_null(cls, stream, value, *args, **kwargs): raise ParseError('Send unwrapped data.') @@ -251,59 +221,47 @@ class CollectionObject(Nullable): _type_name = NAME_COL _type_id = TYPE_COL + _header_class = None type_code = TC_COLLECTION pythonic = list default = [] - @classmethod - def build_header(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ('type', ctypes.c_byte), - ], - } - ) - @classmethod def parse_not_null(cls, stream): - header, header_class = cls.__parse_header(stream) + fields, length = cls.__parse_header(stream) - fields = [] - for i in range(header.length): + for i in range(length): c_type = AnyDataObject.parse(stream) - fields.append(('element_{}'.format(i), c_type)) + fields.append((f'element_{i}', c_type)) - return cls.__build_final_class(header_class, fields) + return cls.__build_final_class(fields) @classmethod async def parse_not_null_async(cls, stream): - header, header_class = cls.__parse_header(stream) + fields, length = cls.__parse_header(stream) - fields = [] - for i in range(header.length): + for i in range(length): c_type = await AnyDataObject.parse_async(stream) - fields.append(('element_{}'.format(i), c_type)) + fields.append((f'element_{i}', c_type)) - return cls.__build_final_class(header_class, fields) + return cls.__build_final_class(fields) @classmethod def __parse_header(cls, stream): - header_class = cls.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) - return header, header_class + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + header_fields = [('type_code', ctypes.c_byte), ('length', ctypes.c_int), ('type', ctypes.c_byte)] + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(int_sz + 2 * b_sz, SEEK_CUR) + return header_fields, length @classmethod - def __build_final_class(cls, header_class, fields): + def __build_final_class(cls, fields): return type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, @@ -311,134 +269,91 @@ def __build_final_class(cls, header_class, fields): ) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - length = cls.__get_length(ctype_object) - if length is None: - return None - + def to_python_not_null(cls, ctypes_object, *args, **kwargs): result = [ - AnyDataObject.to_python(getattr(ctype_object, f'element_{i}'), *args, **kwargs) - for i in range(length) + AnyDataObject.to_python(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) + for i in range(ctypes_object.length) ] - return ctype_object.type, result + return ctypes_object.type, result @classmethod - async def to_python_async(cls, ctype_object, *args, **kwargs): - length = cls.__get_length(ctype_object) - if length is None: - return None - + async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): result_coro = [ - AnyDataObject.to_python_async(getattr(ctype_object, f'element_{i}'), *args, **kwargs) - for i in range(length) + AnyDataObject.to_python_async(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) + for i in range(ctypes_object.length) ] - return ctype_object.type, await asyncio.gather(*result_coro) - - @classmethod - def __get_length(cls, ctype_object): - return getattr(ctype_object, "length", None) + return ctypes_object.type, await asyncio.gather(*result_coro) @classmethod def from_python_not_null(cls, stream, value, *args, **kwargs): - type_or_id, value = value + type_id, value = value try: length = len(value) except TypeError: value = [value] length = 1 - cls.__write_header(stream, type_or_id, length) + cls.__write_header(stream, type_id, length) for x in value: infer_from_python(stream, x) @classmethod async def from_python_not_null_async(cls, stream, value, *args, **kwargs): - type_or_id, value = value + type_id, value = value try: length = len(value) except TypeError: value = [value] length = 1 - cls.__write_header(stream, type_or_id, length) + cls.__write_header(stream, type_id, length) for x in value: await infer_from_python_async(stream, x) @classmethod - def __write_header(cls, stream, type_or_id, length): - header_class = cls.build_header() - header = header_class() - header.type_code = int.from_bytes( - cls.type_code, - byteorder=PROTOCOL_BYTE_ORDER + def __write_header(cls, stream, type_id, length): + stream.write(cls.type_code) + stream.write(length.to_bytes( + ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER + )) + stream.write(type_id.to_bytes( + length=ctypes.sizeof(ctypes.c_byte), + byteorder=PROTOCOL_BYTE_ORDER, + signed=True) ) - header.length = length - header.type = type_or_id - - stream.write(header) - -class Map(Nullable): - """ - Dictionary type, payload-only. - - Ignite does not track the order of key-value pairs in its caches, hence - the ordinary Python dict type, not the collections.OrderedDict. - """ - _type_name = NAME_MAP - _type_id = TYPE_MAP +class _MapBase: HASH_MAP = 1 LINKED_HASH_MAP = 2 @classmethod - def build_header(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ], - } - ) + def _parse_header(cls, stream): + raise NotImplementedError @classmethod - def parse_not_null(cls, stream): - header, header_class = cls.__parse_header(stream) - - fields = [] - for i in range(header.length << 1): + def _parse(cls, stream): + fields, length = cls._parse_header(stream) + for i in range(length << 1): c_type = AnyDataObject.parse(stream) - fields.append(('element_{}'.format(i), c_type)) - - return cls.__build_final_class(header_class, fields) + fields.append((f'element_{i}', c_type)) + return cls.__build_final_class(fields) @classmethod - async def parse_not_null_async(cls, stream): - header, header_class = cls.__parse_header(stream) - - fields = [] - for i in range(header.length << 1): + async def _parse_async(cls, stream): + fields, length = cls._parse_header(stream) + for i in range(length << 1): c_type = await AnyDataObject.parse_async(stream) - fields.append(('element_{}'.format(i), c_type)) - - return cls.__build_final_class(header_class, fields) + fields.append((f'element_{i}', c_type)) - @classmethod - def __parse_header(cls, stream): - header_class = cls.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) - return header, header_class + return cls.__build_final_class(fields) @classmethod - def __build_final_class(cls, header_class, fields): + def __build_final_class(cls, fields): return type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, @@ -446,76 +361,118 @@ def __build_final_class(cls, header_class, fields): ) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - map_cls = cls.__get_map_class(ctype_object) + def _to_python(cls, ctypes_object, *args, **kwargs): + map_cls = cls.__get_map_class(ctypes_object) result = map_cls() - for i in range(0, ctype_object.length << 1, 2): + for i in range(0, ctypes_object.length << 1, 2): k = AnyDataObject.to_python( - getattr(ctype_object, 'element_{}'.format(i)), + getattr(ctypes_object, f'element_{i}'), *args, **kwargs ) v = AnyDataObject.to_python( - getattr(ctype_object, 'element_{}'.format(i + 1)), + getattr(ctypes_object, f'element_{i + 1}'), *args, **kwargs ) result[k] = v return result @classmethod - async def to_python_async(cls, ctype_object, *args, **kwargs): - map_cls = cls.__get_map_class(ctype_object) + async def _to_python_async(cls, ctypes_object, *args, **kwargs): + map_cls = cls.__get_map_class(ctypes_object) kv_pairs_coro = [ asyncio.gather( AnyDataObject.to_python_async( - getattr(ctype_object, 'element_{}'.format(i)), + getattr(ctypes_object, f'element_{i}'), *args, **kwargs ), AnyDataObject.to_python_async( - getattr(ctype_object, 'element_{}'.format(i + 1)), + getattr(ctypes_object, f'element_{i + 1}'), *args, **kwargs ) - ) for i in range(0, ctype_object.length << 1, 2) + ) for i in range(0, ctypes_object.length << 1, 2) ] return map_cls(await asyncio.gather(*kv_pairs_coro)) @classmethod - def __get_map_class(cls, ctype_object): - map_type = getattr(ctype_object, 'type', cls.HASH_MAP) + def __get_map_class(cls, ctypes_object): + map_type = getattr(ctypes_object, 'type', cls.HASH_MAP) return OrderedDict if map_type == cls.LINKED_HASH_MAP else dict @classmethod - def from_python(cls, stream, value, type_id=None): - cls.__write_header(stream, type_id, len(value)) + def _from_python(cls, stream, value, type_id=None): + cls._write_header(stream, type_id, len(value)) for k, v in value.items(): infer_from_python(stream, k) infer_from_python(stream, v) @classmethod - async def from_python_async(cls, stream, value, type_id=None): - cls.__write_header(stream, type_id, len(value)) + async def _from_python_async(cls, stream, value, type_id): + cls._write_header(stream, type_id, len(value)) for k, v in value.items(): await infer_from_python_async(stream, k) await infer_from_python_async(stream, v) @classmethod - def __write_header(cls, stream, type_id, length): - header_class = cls.build_header() - header = header_class() - header.length = length + def _write_header(cls, stream, type_id, length): + raise NotImplementedError - if hasattr(header, 'type_code'): - header.type_code = int.from_bytes(cls.type_code, byteorder=PROTOCOL_BYTE_ORDER) - if hasattr(header, 'type'): - header.type = type_id +class Map(IgniteDataType, _MapBase): + """ + Dictionary type, payload-only. - stream.write(header) + Ignite does not track the order of key-value pairs in its caches, hence + the ordinary Python dict type, not the collections.OrderedDict. + """ + _type_name = NAME_MAP + _type_id = TYPE_MAP + + @classmethod + def parse(cls, stream): + return cls._parse(stream) + + @classmethod + async def parse_async(cls, stream): + return await cls._parse_async(stream) + + @classmethod + def _parse_header(cls, stream): + int_sz = ctypes.sizeof(ctypes.c_int) + length = int.from_bytes( + stream.slice(stream.tell(), int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(int_sz, SEEK_CUR) + return [('length', ctypes.c_int)], length + + @classmethod + def to_python(cls, ctypes_object, *args, **kwargs): + return cls._to_python(ctypes_object, *args, **kwargs) + + @classmethod + async def to_python_async(cls, ctypes_object, *args, **kwargs): + return await cls._to_python_async(ctypes_object, *args, **kwargs) + + @classmethod + def from_python(cls, stream, value, type_id=None): + return cls._from_python(stream, value, type_id) + + @classmethod + async def from_python_async(cls, stream, value, type_id=None): + return await cls._from_python_async(stream, value, type_id) + + @classmethod + def _write_header(cls, stream, type_id, length): + stream.write(length.to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER + )) -class MapObject(Map): +class MapObject(Nullable, _MapBase): """ This is a dictionary type. @@ -531,61 +488,65 @@ class MapObject(Map): default = {} @classmethod - def build_header(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ('type', ctypes.c_byte), - ], - } - ) + def parse_not_null(cls, stream): + return cls._parse(stream) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - obj_type = getattr(ctype_object, "type", None) - if obj_type: - return obj_type, super().to_python(ctype_object, *args, **kwargs) - return None + async def parse_not_null_async(cls, stream): + return await cls._parse_async(stream) @classmethod - async def to_python_async(cls, ctype_object, *args, **kwargs): - obj_type = getattr(ctype_object, "type", None) - if obj_type: - return obj_type, await super().to_python_async(ctype_object, *args, **kwargs) - return None + def _parse_header(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(int_sz + 2 * b_sz, SEEK_CUR) + fields = [('type_code', ctypes.c_byte), ('length', ctypes.c_int), ('type', ctypes.c_byte)] + return fields, length @classmethod - def __get_obj_type(cls, ctype_object): - return getattr(ctype_object, "type", None) + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + return ctypes_object.type, cls._to_python(ctypes_object, *args, **kwargs) @classmethod - def from_python(cls, stream, value, **kwargs): - type_id, value = cls.__unpack_value(stream, value) - if value: - super().from_python(stream, value, type_id) + async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): + return ctypes_object.type, await cls._to_python_async(ctypes_object, *args, **kwargs) @classmethod - async def from_python_async(cls, stream, value, **kwargs): - type_id, value = cls.__unpack_value(stream, value) - if value: - await super().from_python_async(stream, value, type_id) + def from_python_not_null(cls, stream, value, **kwargs): + type_id, value = value + if value is None: + Null.from_python(stream) + else: + cls._from_python(stream, value, type_id) @classmethod - def __unpack_value(cls, stream, value): + async def from_python_not_null_async(cls, stream, value, **kwargs): + type_id, value = value if value is None: Null.from_python(stream) - return None, None + else: + await cls._from_python_async(stream, value, type_id) - return value + @classmethod + def _write_header(cls, stream, type_id, length): + stream.write(cls.type_code) + stream.write(length.to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER) + ) + stream.write(type_id.to_bytes( + length=ctypes.sizeof(ctypes.c_byte), + byteorder=PROTOCOL_BYTE_ORDER, + signed=True) + ) class BinaryObject(Nullable): _type_id = TYPE_BINARY_OBJ + _header_class = None type_code = TC_COMPLEX_OBJECT USER_TYPE = 0x0001 @@ -615,24 +576,26 @@ async def hashcode_async(cls, value: object, client: Optional['AioClient']) -> i return value._hashcode @classmethod - def build_header(cls): - return type( - cls.__name__, - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('version', ctypes.c_byte), - ('flags', ctypes.c_short), - ('type_id', ctypes.c_int), - ('hash_code', ctypes.c_int), - ('length', ctypes.c_int), - ('schema_id', ctypes.c_int), - ('schema_offset', ctypes.c_int), - ], - } - ) + def get_header_class(cls): + if not cls._header_class: + cls._header_class = type( + cls.__name__, + (ctypes.LittleEndianStructure,), + { + '_pack_': 1, + '_fields_': [ + ('type_code', ctypes.c_byte), + ('version', ctypes.c_byte), + ('flags', ctypes.c_short), + ('type_id', ctypes.c_int), + ('hash_code', ctypes.c_int), + ('length', ctypes.c_int), + ('schema_id', ctypes.c_int), + ('schema_offset', ctypes.c_int), + ], + } + ) + return cls._header_class @classmethod def offset_c_type(cls, flags: int): @@ -686,7 +649,7 @@ async def parse_not_null_async(cls, stream): @classmethod def __parse_header(cls, stream): - header_class = cls.build_header() + header_class = cls.get_header_class() header = stream.read_ctype(header_class) stream.seek(ctypes.sizeof(header_class), SEEK_CUR) return header, header_class @@ -717,51 +680,51 @@ def __build_final_class(cls, stream, header, header_class, object_fields, fields return final_class @classmethod - def to_python(cls, ctype_object, client: 'Client' = None, *args, **kwargs): - type_id = cls.__get_type_id(ctype_object, client) - if type_id: - data_class = client.query_binary_type(type_id, ctype_object.schema_id) - - result = data_class() - result.version = ctype_object.version - for field_name, field_type in data_class.schema.items(): - setattr( - result, field_name, field_type.to_python( - getattr(ctype_object.object_fields, field_name), - client, *args, **kwargs - ) - ) - return result + def to_python_not_null(cls, ctypes_object, client: 'Client' = None, *args, **kwargs): + type_id = ctypes_object.type_id + if not client: + raise ParseError(f'Can not query binary type {type_id}') - return None + data_class = client.query_binary_type(type_id, ctypes_object.schema_id) + result = data_class() + result.version = ctypes_object.version - @classmethod - async def to_python_async(cls, ctype_object, client: 'AioClient' = None, *args, **kwargs): - type_id = cls.__get_type_id(ctype_object, client) - if type_id: - data_class = await client.query_binary_type(type_id, ctype_object.schema_id) - - result = data_class() - result.version = ctype_object.version - - field_values = await asyncio.gather( - *[ - field_type.to_python_async( - getattr(ctype_object.object_fields, field_name), client, *args, **kwargs - ) - for field_name, field_type in data_class.schema.items() - ] + for field_name, field_type in data_class.schema.items(): + setattr( + result, field_name, field_type.to_python( + getattr(ctypes_object.object_fields, field_name), + client, *args, **kwargs + ) ) + return result - for i, field_name in enumerate(data_class.schema.keys()): - setattr(result, field_name, field_values[i]) + @classmethod + async def to_python_not_null_async(cls, ctypes_object, client: 'AioClient' = None, *args, **kwargs): + type_id = ctypes_object.type_id + if not client: + raise ParseError(f'Can not query binary type {type_id}') - return result - return None + data_class = await client.query_binary_type(type_id, ctypes_object.schema_id) + result = data_class() + result.version = ctypes_object.version + + field_values = await asyncio.gather( + *[ + field_type.to_python_async( + getattr(ctypes_object.object_fields, field_name), client, *args, **kwargs + ) + for field_name, field_type in data_class.schema.items() + ] + ) + + for i, field_name in enumerate(data_class.schema.keys()): + setattr(result, field_name, field_values[i]) + + return result @classmethod - def __get_type_id(cls, ctype_object, client): - type_id = getattr(ctype_object, "type_id", None) + def __get_type_id(cls, ctypes_object, client): + type_id = getattr(ctypes_object, "type_id", None) if type_id: if not client: raise ParseError(f'Can not query binary type {type_id}') diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 55ed844..9bd1b76 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -136,15 +136,15 @@ async def parse_async(self, stream, context): return await self.var1.parse_async(stream) return await self.var2.parse_async(stream) - def to_python(self, ctype_object, context, *args, **kwargs): + def to_python(self, ctypes_object, context, *args, **kwargs): if self.predicate2(context): - return self.var1.to_python(ctype_object, *args, **kwargs) - return self.var2.to_python(ctype_object, *args, **kwargs) + return self.var1.to_python(ctypes_object, *args, **kwargs) + return self.var2.to_python(ctypes_object, *args, **kwargs) - async def to_python_async(self, ctype_object, context, *args, **kwargs): + async def to_python_async(self, ctypes_object, context, *args, **kwargs): if self.predicate2(context): - return await self.var1.to_python_async(ctype_object, *args, **kwargs) - return await self.var2.to_python_async(ctype_object, *args, **kwargs) + return await self.var1.to_python_async(ctypes_object, *args, **kwargs) + return await self.var2.to_python_async(ctypes_object, *args, **kwargs) @attr.s @@ -154,67 +154,56 @@ class StructArray: counter_type = attr.ib(default=ctypes.c_int) defaults = attr.ib(type=dict, default={}) - def build_header_class(self): - return type( - self.__class__.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', self.counter_type), - ], - }, - ) - def parse(self, stream): - fields, length = [], self.__parse_length(stream) + fields, length = self.__parse_header(stream) for i in range(length): c_type = Struct(self.following).parse(stream) - fields.append(('element_{}'.format(i), c_type)) + fields.append((f'element_{i}', c_type)) - return self.__build_final_class(fields) + return self.build_c_type(fields) async def parse_async(self, stream): - fields, length = [], self.__parse_length(stream) + fields, length = self.__parse_header(stream) for i in range(length): c_type = await Struct(self.following).parse_async(stream) - fields.append(('element_{}'.format(i), c_type)) + fields.append((f'element_{i}', c_type)) - return self.__build_final_class(fields) + return self.build_c_type(fields) - def __parse_length(self, stream): - counter_type_len = ctypes.sizeof(self.counter_type) + def __parse_header(self, stream): + counter_sz = ctypes.sizeof(self.counter_type) length = int.from_bytes( - stream.slice(offset=counter_type_len), + stream.slice(offset=counter_sz), byteorder=PROTOCOL_BYTE_ORDER ) - stream.seek(counter_type_len, SEEK_CUR) - return length + stream.seek(counter_sz, SEEK_CUR) + return [('length', self.counter_type)], length - def __build_final_class(self, fields): + @staticmethod + def build_c_type(fields): return type( 'StructArray', - (self.build_header_class(),), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, }, ) - def to_python(self, ctype_object, *args, **kwargs): - length = getattr(ctype_object, 'length', 0) + def to_python(self, ctypes_object, *args, **kwargs): + length = getattr(ctypes_object, 'length', 0) return [ - Struct(self.following, dict_type=dict).to_python(getattr(ctype_object, 'element_{}'.format(i)), + Struct(self.following, dict_type=dict).to_python(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) for i in range(length) ] - async def to_python_async(self, ctype_object, *args, **kwargs): - length = getattr(ctype_object, 'length', 0) + async def to_python_async(self, ctypes_object, *args, **kwargs): + length = getattr(ctypes_object, 'length', 0) result_coro = [ - Struct(self.following, dict_type=dict).to_python_async(getattr(ctype_object, 'element_{}'.format(i)), + Struct(self.following, dict_type=dict).to_python_async(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) for i in range(length) ] @@ -239,10 +228,10 @@ async def from_python_async(self, stream, value): await el_class.from_python_async(stream, v[name]) def __write_header(self, stream, length): - header_class = self.build_header_class() - header = header_class() - header.length = length - stream.write(header) + stream.write( + length.to_bytes(ctypes.sizeof(self.counter_type), + byteorder=PROTOCOL_BYTE_ORDER) + ) @attr.s @@ -262,7 +251,7 @@ def parse(self, stream): if name in ctx: ctx[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) - return self.__build_final_class(fields) + return self.build_c_type(fields) async def parse_async(self, stream): fields, ctx = [], self.__prepare_conditional_ctx() @@ -274,7 +263,7 @@ async def parse_async(self, stream): if name in ctx: ctx[name] = stream.read_ctype(c_type, direction=READ_BACKWARD) - return self.__build_final_class(fields) + return self.build_c_type(fields) def __prepare_conditional_ctx(self): ctx = {} @@ -285,7 +274,7 @@ def __prepare_conditional_ctx(self): return ctx @staticmethod - def __build_final_class(fields): + def build_c_type(fields): return type( 'Struct', (ctypes.LittleEndianStructure,), @@ -295,34 +284,34 @@ def __build_final_class(fields): }, ) - def to_python(self, ctype_object, *args, **kwargs) -> Union[dict, OrderedDict]: + def to_python(self, ctypes_object, *args, **kwargs) -> Union[dict, OrderedDict]: result = self.dict_type() for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) result[name] = c_type.to_python( - getattr(ctype_object, name), + getattr(ctypes_object, name), result, *args, **kwargs ) if is_cond else c_type.to_python( - getattr(ctype_object, name), + getattr(ctypes_object, name), *args, **kwargs ) return result - async def to_python_async(self, ctype_object, *args, **kwargs) -> Union[dict, OrderedDict]: + async def to_python_async(self, ctypes_object, *args, **kwargs) -> Union[dict, OrderedDict]: result = self.dict_type() for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) if is_cond: value = await c_type.to_python_async( - getattr(ctype_object, name), + getattr(ctypes_object, name), result, *args, **kwargs ) else: value = await c_type.to_python_async( - getattr(ctype_object, name), + getattr(ctypes_object, name), *args, **kwargs ) result[name] = value @@ -405,18 +394,18 @@ def __data_class_parse(cls, stream): raise ParseError('Unknown type code: `{}`'.format(type_code)) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - data_class = cls.__data_class_from_ctype(ctype_object) - return data_class.to_python(ctype_object) + def to_python(cls, ctypes_object, *args, **kwargs): + data_class = cls.__data_class_from_ctype(ctypes_object) + return data_class.to_python(ctypes_object) @classmethod - async def to_python_async(cls, ctype_object, *args, **kwargs): - data_class = cls.__data_class_from_ctype(ctype_object) - return await data_class.to_python_async(ctype_object) + async def to_python_async(cls, ctypes_object, *args, **kwargs): + data_class = cls.__data_class_from_ctype(ctypes_object) + return await data_class.to_python_async(ctypes_object) @classmethod - def __data_class_from_ctype(cls, ctype_object): - type_code = ctype_object.type_code.to_bytes( + def __data_class_from_ctype(cls, ctypes_object): + type_code = ctypes_object.type_code.to_bytes( ctypes.sizeof(ctypes.c_byte), byteorder=PROTOCOL_BYTE_ORDER ) @@ -440,7 +429,7 @@ def _init_python_mapping(cls): int: LongObject, float: DoubleObject, str: String, - bytes: String, + bytes: ByteArrayObject, bytearray: ByteArrayObject, bool: BoolObject, type(None): Null, @@ -455,7 +444,6 @@ def _init_python_mapping(cls): int: LongArrayObject, float: DoubleArrayObject, str: StringArrayObject, - bytes: StringArrayObject, bool: BoolArrayObject, uuid.UUID: UUIDArrayObject, datetime: DateArrayObject, @@ -558,48 +546,33 @@ class AnyDataArray(AnyDataObject): """ counter_type = attr.ib(default=ctypes.c_int) - def build_header(self): - return type( - self.__class__.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', self.counter_type), - ], - } - ) - def parse(self, stream): - header, header_class = self.__parse_header(stream) - - fields = [] - for i in range(header.length): + fields, length = self.__parse_header(stream) + for i in range(length): c_type = super().parse(stream) - fields.append(('element_{}'.format(i), c_type)) - - return self.__build_final_class(header_class, fields) + fields.append((f'element_{i}', c_type)) + return self.build_c_type(fields) async def parse_async(self, stream): - header, header_class = self.__parse_header(stream) - - fields = [] - for i in range(header.length): + fields, length = self.__parse_header(stream) + for i in range(length): c_type = await super().parse_async(stream) - fields.append(('element_{}'.format(i), c_type)) - - return self.__build_final_class(header_class, fields) + fields.append((f'element_{i}', c_type)) + return self.build_c_type(fields) def __parse_header(self, stream): - header_class = self.build_header() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) - return header, header_class + cnt_sz = ctypes.sizeof(self.counter_type) + length = int.from_bytes( + stream.slice(stream.tell(), cnt_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(cnt_sz, SEEK_CUR) + return [('length', self.counter_type)], length - def __build_final_class(self, header_class, fields): + def build_c_type(self, fields): return type( self.__class__.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, @@ -607,56 +580,50 @@ def __build_final_class(self, header_class, fields): ) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - length = cls.__get_length(ctype_object) + def to_python(cls, ctypes_object, *args, **kwargs): + length = getattr(ctypes_object, "length", 0) return [ - super().to_python(getattr(ctype_object, 'element_{}'.format(i)), *args, **kwargs) + super().to_python(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) for i in range(length) ] @classmethod - async def to_python_async(cls, ctype_object, *args, **kwargs): - length = cls.__get_length(ctype_object) + async def to_python_async(cls, ctypes_object, *args, **kwargs): + length = getattr(ctypes_object, "length", 0) values = asyncio.gather( *[ super().to_python( - getattr(ctype_object, 'element_{}'.format(i)), + getattr(ctypes_object, f'element_{i}'), *args, **kwargs ) for i in range(length) ] ) return await values - @staticmethod - def __get_length(ctype_object): - return getattr(ctype_object, "length", None) - def from_python(self, stream, value): - try: - length = len(value) - except TypeError: - value = [value] - length = 1 - self.__write_header(stream, length) + value = self.__write_header_and_process_value(stream, value) for x in value: infer_from_python(stream, x) async def from_python_async(self, stream, value): + value = self.__write_header_and_process_value(stream, value) + + for x in value: + await infer_from_python_async(stream, x) + + def __write_header_and_process_value(self, stream, value): try: length = len(value) except TypeError: value = [value] length = 1 - self.__write_header(stream, length) - for x in value: - await infer_from_python_async(stream, x) + stream.write(length.to_bytes( + ctypes.sizeof(self.counter_type), + byteorder=PROTOCOL_BYTE_ORDER + )) - def __write_header(self, stream, length): - header_class = self.build_header() - header = header_class() - header.length = length - stream.write(header) + return value diff --git a/pyignite/datatypes/primitive.py b/pyignite/datatypes/primitive.py index 3bbb196..037f680 100644 --- a/pyignite/datatypes/primitive.py +++ b/pyignite/datatypes/primitive.py @@ -52,8 +52,8 @@ def parse(cls, stream): return cls.c_type @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object + def to_python(cls, ctypes_object, *args, **kwargs): + return ctypes_object class Byte(Primitive): @@ -122,8 +122,8 @@ class Char(Primitive): c_type = ctypes.c_short @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object.value.to_bytes( + def to_python(cls, ctypes_object, *args, **kwargs): + return ctypes_object.value.to_bytes( ctypes.sizeof(cls.c_type), byteorder=PROTOCOL_BYTE_ORDER ).decode(PROTOCOL_CHAR_ENCODING) @@ -147,9 +147,9 @@ class Bool(Primitive): c_type = ctypes.c_byte # Use c_byte because c_bool throws endianness conversion error on BE systems. @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - return ctype_object != 0 + def to_python(cls, ctypes_object, *args, **kwargs): + return ctypes_object != 0 @classmethod - def from_python(cls, stream, value): + def from_python(cls, stream, value, **kwargs): stream.write(struct.pack(" int: return ord(value) @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - value = getattr(ctype_object, "value", None) - if value is None: - return None + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + value = ctypes_object.value return value.to_bytes( ctypes.sizeof(cls.c_type), byteorder=PROTOCOL_BYTE_ORDER @@ -224,8 +218,5 @@ def hashcode(cls, value: bool, *args, **kwargs) -> int: return 1231 if value else 1237 @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - value = getattr(ctype_object, "value", None) - if value is None: - return None - return value != 0 + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + return ctypes_object.value != 0 diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index 4ca6795..5657afb 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -23,6 +23,7 @@ from pyignite.constants import * from pyignite.utils import datetime_hashcode, decimal_hashcode, hashcode +from .base import IgniteDataType from .type_codes import * from .type_ids import * from .type_names import * @@ -100,14 +101,14 @@ def parse_not_null(cls, stream): return data_type @classmethod - def to_python_not_null(cls, ctype_object, *args, **kwargs): - if ctype_object.length > 0: - return ctype_object.data.decode(PROTOCOL_STRING_ENCODING) + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + if ctypes_object.length > 0: + return ctypes_object.data.decode(PROTOCOL_STRING_ENCODING) return '' @classmethod - def from_python_not_null(cls, stream, value): + def from_python_not_null(cls, stream, value, **kwargs): if isinstance(value, str): value = value.encode(PROTOCOL_STRING_ENCODING) length = len(value) @@ -135,7 +136,7 @@ def hashcode(cls, value: decimal.Decimal, *args, **kwargs) -> int: return decimal_hashcode(value) @classmethod - def build_c_header(cls): + def build_c_type(cls, length): return type( cls.__name__, (ctypes.LittleEndianStructure,), @@ -145,48 +146,41 @@ def build_c_header(cls): ('type_code', ctypes.c_byte), ('scale', ctypes.c_int), ('length', ctypes.c_int), - ], + ('data', ctypes.c_ubyte * length) + ] } ) @classmethod def parse_not_null(cls, stream): - header_class = cls.build_c_header() - header = stream.read_ctype(header_class) - - data_type = type( - cls.__name__, - (header_class,), - { - '_pack_': 1, - '_fields_': [ - ('data', ctypes.c_ubyte * header.length), - ], - } + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + int_sz + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) - + data_type = cls.build_c_type(length) stream.seek(ctypes.sizeof(data_type), SEEK_CUR) return data_type @classmethod - def to_python_not_null(cls, ctype_object, *args, **kwargs): - sign = 1 if ctype_object.data[0] & 0x80 else 0 - data = ctype_object.data[1:] - data.insert(0, ctype_object.data[0] & 0x7f) + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + sign = 1 if ctypes_object.data[0] & 0x80 else 0 + data = ctypes_object.data[1:] + data.insert(0, ctypes_object.data[0] & 0x7f) # decode n-byte integer result = sum([ [x for x in reversed(data)][i] * 0x100 ** i for i in range(len(data)) ]) # apply scale - result = result / decimal.Decimal('10') ** decimal.Decimal(ctype_object.scale) + result = result / decimal.Decimal('10') ** decimal.Decimal(ctypes_object.scale) if sign: # apply sign result = -result return result @classmethod - def from_python_not_null(cls, stream, value: decimal.Decimal): + def from_python_not_null(cls, stream, value: decimal.Decimal, **kwargs): sign, digits, scale = value.normalize().as_tuple() integer = int(''.join([str(d) for d in digits])) # calculate number of bytes (at least one, and not forget the sign bit) @@ -202,17 +196,7 @@ def from_python_not_null(cls, stream, value: decimal.Decimal): data[0] |= 0x80 else: data[0] &= 0x7f - header_class = cls.build_c_header() - data_class = type( - cls.__name__, - (header_class,), - { - '_pack_': 1, - '_fields_': [ - ('data', ctypes.c_ubyte * length), - ], - } - ) + data_class = cls.build_c_type(length) data_object = data_class() data_object.type_code = int.from_bytes( cls.type_code, @@ -266,7 +250,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python_not_null(cls, stream, value: uuid.UUID): + def from_python_not_null(cls, stream, value: uuid.UUID, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -381,7 +365,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python_not_null(cls, stream, value: [date, datetime]): + def from_python_not_null(cls, stream, value: [date, datetime], **kwargs): if type(value) is date: value = datetime.combine(value, time()) data_type = cls.build_c_type() @@ -433,7 +417,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python_not_null(cls, stream, value: timedelta): + def from_python_not_null(cls, stream, value: timedelta, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -480,7 +464,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python_not_null(cls, stream, value: tuple): + def from_python_not_null(cls, stream, value: tuple, **kwargs): data_type = cls.build_c_type() data_object = data_type() data_object.type_code = int.from_bytes( @@ -505,84 +489,89 @@ class BinaryEnumObject(EnumObject): type_code = TC_BINARY_ENUM -class StandardArray(Nullable): - """ - Base class for array of primitives. Payload-only. - """ - _type_name = None - _type_id = None +class _StandardArrayBase: standard_type = None - type_code = None @classmethod - def build_header_class(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('length', ctypes.c_int), - ], - } - ) + def _parse_header(cls, stream): + raise NotImplementedError @classmethod - def parse_not_null(cls, stream): - header_class = cls.build_header_class() - header = stream.read_ctype(header_class) - stream.seek(ctypes.sizeof(header_class), SEEK_CUR) + def _parse(cls, stream): + fields, length = cls._parse_header(stream) - fields = [] - for i in range(header.length): + for i in range(length): c_type = cls.standard_type.parse(stream) - fields.append(('element_{}'.format(i), c_type)) + fields.append((f'element_{i}', c_type)) - final_class = type( + return type( cls.__name__, - (header_class,), + (ctypes.LittleEndianStructure,), { '_pack_': 1, '_fields_': fields, } ) - return final_class @classmethod - def to_python(cls, ctype_object, *args, **kwargs): - length = getattr(ctype_object, "length", None) - if length is None: - return None + def _write_header(cls, stream, value, **kwargs): + raise NotImplementedError - result = [] - for i in range(length): - result.append( - cls.standard_type.to_python( - getattr(ctype_object, 'element_{}'.format(i)), - *args, **kwargs - ) - ) - return result + @classmethod + def _from_python(cls, stream, value, **kwargs): + cls._write_header(stream, value, **kwargs) + for x in value: + cls.standard_type.from_python(stream, x) @classmethod - async def to_python_async(cls, ctypes_object, *args, **kwargs): - return cls.to_python(ctypes_object, *args, **kwargs) + def _to_python(cls, ctypes_object, *args, **kwargs): + length = ctypes_object.length + return [ + cls.standard_type.to_python( + getattr(ctypes_object, f'element_{i}'), *args, **kwargs + ) for i in range(length) + ] + + +class StandardArray(IgniteDataType, _StandardArrayBase): + """ + Base class for array of primitives. Payload-only. + """ + _type_name = None + _type_id = None + type_code = None @classmethod - def from_python_not_null(cls, stream, value, **kwargs): - header_class = cls.build_header_class() - header = header_class() - if hasattr(header, 'type_code'): - header.type_code = int.from_bytes( - cls.type_code, + def _parse_header(cls, stream): + int_sz = ctypes.sizeof(ctypes.c_int) + length = int.from_bytes( + stream.slice(stream.tell(), int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(int_sz, SEEK_CUR) + + return [('length', ctypes.c_int)], length + + @classmethod + def parse(cls, stream): + return cls._parse(stream) + + @classmethod + def _write_header(cls, stream, value, **kwargs): + stream.write( + len(value).to_bytes( + length=ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER ) - length = len(value) - header.length = length + ) - stream.write(header) - for x in value: - cls.standard_type.from_python(stream, x) + @classmethod + def from_python(cls, stream, value, **kwargs): + cls._from_python(stream, value, **kwargs) + + @classmethod + def to_python(cls, ctypes_object, *args, **kwargs): + return cls._to_python(ctypes_object, *args, **kwargs) class StringArray(StandardArray): @@ -633,26 +622,47 @@ class EnumArray(StandardArray): standard_type = EnumObject -class StandardArrayObject(StandardArray): +class StandardArrayObject(Nullable, _StandardArrayBase): _type_name = None _type_id = None + standard_type = None + type_code = None pythonic = list default = [] @classmethod - def build_header_class(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('length', ctypes.c_int), - ], - } + def _parse_header(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER + ) + stream.seek(int_sz + b_sz, SEEK_CUR) + + return [('type_code', ctypes.c_byte), ('length', ctypes.c_int)], length + + @classmethod + def parse_not_null(cls, stream): + return cls._parse(stream) + + @classmethod + def _write_header(cls, stream, value, **kwargs): + stream.write(cls.type_code) + stream.write( + len(value).to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER + ) ) + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + cls._from_python(stream, value, **kwargs) + + @classmethod + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + return cls._to_python(ctypes_object, *args, **kwargs) + class StringArrayObject(StandardArrayObject): """ List of strings. """ @@ -714,45 +724,43 @@ class EnumArrayObject(StandardArrayObject): standard_type = EnumObject type_code = TC_ENUM_ARRAY + OBJECT = -1 + @classmethod - def build_header_class(cls): - return type( - cls.__name__ + 'Header', - (ctypes.LittleEndianStructure,), - { - '_pack_': 1, - '_fields_': [ - ('type_code', ctypes.c_byte), - ('type_id', ctypes.c_int), - ('length', ctypes.c_int), - ], - } + def _parse_header(cls, stream): + int_sz, b_sz = ctypes.sizeof(ctypes.c_int), ctypes.sizeof(ctypes.c_byte) + length = int.from_bytes( + stream.slice(stream.tell() + b_sz + int_sz, int_sz), + byteorder=PROTOCOL_BYTE_ORDER ) + stream.seek(2 * int_sz + b_sz, SEEK_CUR) + return [('type_code', ctypes.c_byte), ('type_id', ctypes.c_int), ('length', ctypes.c_int)], length @classmethod - def from_python_not_null(cls, stream, value, **kwargs): - type_id, value = value - header_class = cls.build_header_class() - header = header_class() - if hasattr(header, 'type_code'): - header.type_code = int.from_bytes( - cls.type_code, + def _write_header(cls, stream, value, type_id=-1): + stream.write(cls.type_code) + stream.write( + type_id.to_bytes( + length=ctypes.sizeof(ctypes.c_int), + byteorder=PROTOCOL_BYTE_ORDER, + signed=True + ) + ) + stream.write( + len(value).to_bytes( + length=ctypes.sizeof(ctypes.c_int), byteorder=PROTOCOL_BYTE_ORDER ) - length = len(value) - header.length = length - header.type_id = type_id + ) - stream.write(header) - for x in value: - cls.standard_type.from_python(stream, x) + @classmethod + def from_python_not_null(cls, stream, value, **kwargs): + type_id, value = value + super().from_python_not_null(stream, value, type_id=type_id) @classmethod - def to_python_not_null(cls, ctype_object, *args, **kwargs): - type_id = getattr(ctype_object, "type_id", None) - if type_id is None: - return None - return type_id, super().to_python(ctype_object, *args, **kwargs) + def to_python_not_null(cls, ctypes_object, *args, **kwargs): + return ctypes_object.type_id, cls._to_python(ctypes_object, *args, **kwargs) class BinaryEnumArrayObject(EnumArrayObject): diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index f0338e1..c0311ec 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -128,25 +128,25 @@ async def _parse_success_async(self, stream, fields: list): c_type = await ignite_type.parse_async(stream) fields.append((name, c_type)) - def to_python(self, ctype_object, *args, **kwargs): + def to_python(self, ctypes_object, *args, **kwargs): if not self.following: return None result = OrderedDict() for name, c_type in self.following: result[name] = c_type.to_python( - getattr(ctype_object, name), + getattr(ctypes_object, name), *args, **kwargs ) return result - async def to_python_async(self, ctype_object, *args, **kwargs): + async def to_python_async(self, ctypes_object, *args, **kwargs): if not self.following: return None values = await asyncio.gather( - *[c_type.to_python_async(getattr(ctype_object, name), *args, **kwargs) for name, c_type in self.following] + *[c_type.to_python_async(getattr(ctypes_object, name), *args, **kwargs) for name, c_type in self.following] ) return OrderedDict([(name, values[i]) for i, (name, _) in enumerate(self.following)]) @@ -239,13 +239,13 @@ def __body_class_post_process(body_class, fields, data_fields): ('more', ctypes.c_byte), ] - def to_python(self, ctype_object, *args, **kwargs): - if getattr(ctype_object, 'status_code', 0) == 0: - result = self.__to_python_result_header(ctype_object, *args, **kwargs) + def to_python(self, ctypes_object, *args, **kwargs): + if getattr(ctypes_object, 'status_code', 0) == 0: + result = self.__to_python_result_header(ctypes_object, *args, **kwargs) - for row_item in ctype_object.data._fields_: + for row_item in ctypes_object.data._fields_: row_name = row_item[0] - row_object = getattr(ctype_object.data, row_name) + row_object = getattr(ctypes_object.data, row_name) row = [] for col_item in row_object._fields_: col_name = col_item[0] @@ -254,14 +254,14 @@ def to_python(self, ctype_object, *args, **kwargs): result['data'].append(row) return result - async def to_python_async(self, ctype_object, *args, **kwargs): - if getattr(ctype_object, 'status_code', 0) == 0: - result = self.__to_python_result_header(ctype_object, *args, **kwargs) + async def to_python_async(self, ctypes_object, *args, **kwargs): + if getattr(ctypes_object, 'status_code', 0) == 0: + result = self.__to_python_result_header(ctypes_object, *args, **kwargs) data_coro = [] - for row_item in ctype_object.data._fields_: + for row_item in ctypes_object.data._fields_: row_name = row_item[0] - row_object = getattr(ctype_object.data, row_name) + row_object = getattr(ctypes_object.data, row_name) row_coro = [] for col_item in row_object._fields_: col_name = col_item[0] @@ -274,18 +274,18 @@ async def to_python_async(self, ctype_object, *args, **kwargs): return result @staticmethod - def __to_python_result_header(ctype_object, *args, **kwargs): + def __to_python_result_header(ctypes_object, *args, **kwargs): result = { - 'more': Bool.to_python(ctype_object.more, *args, **kwargs), + 'more': Bool.to_python(ctypes_object.more, *args, **kwargs), 'data': [], } - if hasattr(ctype_object, 'fields'): - result['fields'] = StringArray.to_python(ctype_object.fields, *args, **kwargs) + if hasattr(ctypes_object, 'fields'): + result['fields'] = StringArray.to_python(ctypes_object.fields, *args, **kwargs) else: - result['field_count'] = Int.to_python(ctype_object.field_count, *args, **kwargs) + result['field_count'] = Int.to_python(ctypes_object.field_count, *args, **kwargs) - if hasattr(ctype_object, 'cursor'): - result['cursor'] = Long.to_python(ctype_object.cursor, *args, **kwargs) + if hasattr(ctypes_object, 'cursor'): + result['cursor'] = Long.to_python(ctypes_object.cursor, *args, **kwargs) return result @@ -328,26 +328,26 @@ def __process_type_exists(stream, fields): return type_exists - def to_python(self, ctype_object, *args, **kwargs): - if getattr(ctype_object, 'status_code', 0) == 0: + def to_python(self, ctypes_object, *args, **kwargs): + if getattr(ctypes_object, 'status_code', 0) == 0: result = { - 'type_exists': Bool.to_python(ctype_object.type_exists) + 'type_exists': Bool.to_python(ctypes_object.type_exists) } - if hasattr(ctype_object, 'body'): - result.update(body_struct.to_python(ctype_object.body)) + if hasattr(ctypes_object, 'body'): + result.update(body_struct.to_python(ctypes_object.body)) - if hasattr(ctype_object, 'enums'): - result['enums'] = enum_struct.to_python(ctype_object.enums) + if hasattr(ctypes_object, 'enums'): + result['enums'] = enum_struct.to_python(ctypes_object.enums) - if hasattr(ctype_object, 'schema'): + if hasattr(ctypes_object, 'schema'): result['schema'] = { x['schema_id']: [ z['schema_field_id'] for z in x['schema_fields'] ] - for x in schema_struct.to_python(ctype_object.schema) + for x in schema_struct.to_python(ctypes_object.schema) } return result - async def to_python_async(self, ctype_object, *args, **kwargs): - return self.to_python(ctype_object, *args, **kwargs) + async def to_python_async(self, ctypes_object, *args, **kwargs): + return self.to_python(ctypes_object, *args, **kwargs) diff --git a/tests/common/test_datatypes.py b/tests/common/test_datatypes.py index c1aa19f..6771f94 100644 --- a/tests/common/test_datatypes.py +++ b/tests/common/test_datatypes.py @@ -50,6 +50,7 @@ # arrays of integers ([1, 2, 3, 5], None), + (b'buzz', None), (b'buzz', ByteArrayObject), (bytearray([7, 8, 8, 11]), None), (bytearray([7, 8, 8, 11]), ByteArrayObject), @@ -122,7 +123,7 @@ ((-1, [(6001, 1), (6002, 2), (6003, 3)]), BinaryEnumArrayObject), # object array - ((ObjectArrayObject.OBJECT, [1, 2, decimal.Decimal('3')]), ObjectArrayObject), + ((ObjectArrayObject.OBJECT, [1, 2, decimal.Decimal('3'), bytearray(b'\x10\x20')]), ObjectArrayObject), # collection ((CollectionObject.LINKED_LIST, [1, 2, 3]), None), @@ -153,42 +154,47 @@ async def test_put_get_data_async(async_cache, value, value_hint): bytearray_params = [ - [1, 2, 3, 5], - (7, 8, 13, 18), - (-128, -1, 0, 1, 127, 255), + ([1, 2, 3, 5], ByteArrayObject), + ((7, 8, 13, 18), ByteArrayObject), + ((-128, -1, 0, 1, 127, 255), ByteArrayObject), + (b'\x01\x03\x10', None), + (bytearray(b'\x01\x30'), None) ] @pytest.mark.parametrize( - 'value', + 'value,type_hint', bytearray_params ) -def test_bytearray_from_list_or_tuple(cache, value): +def test_bytearray_from_different_input(cache, value, type_hint): """ ByteArrayObject's pythonic type is `bytearray`, but it should also accept lists or tuples as a content. """ - - cache.put('my_key', value, value_hint=ByteArrayObject) - - assert cache.get('my_key') == bytearray([unsigned(ch, ctypes.c_ubyte) for ch in value]) + cache.put('my_key', value, value_hint=type_hint) + __check_bytearray_from_different_input(cache.get('my_key'), value) @pytest.mark.parametrize( - 'value', + 'value,type_hint', bytearray_params ) @pytest.mark.asyncio -async def test_bytearray_from_list_or_tuple_async(async_cache, value): +async def test_bytearray_from_different_input_async(async_cache, value, type_hint): """ ByteArrayObject's pythonic type is `bytearray`, but it should also accept lists or tuples as a content. """ - await async_cache.put('my_key', value, value_hint=ByteArrayObject) + __check_bytearray_from_different_input(await async_cache.get('my_key'), value) + - result = await async_cache.get('my_key') - assert result == bytearray([unsigned(ch, ctypes.c_ubyte) for ch in value]) +def __check_bytearray_from_different_input(result, value): + if isinstance(value, (bytes, bytearray)): + assert isinstance(result, bytes) + assert value == result + else: + assert result == bytearray([unsigned(ch, ctypes.c_ubyte) for ch in value]) uuid_params = [ diff --git a/tests/common/test_key_value.py b/tests/common/test_key_value.py index 6e6df61..b03bec2 100644 --- a/tests/common/test_key_value.py +++ b/tests/common/test_key_value.py @@ -422,10 +422,13 @@ async def test_put_get_collection_async(async_cache, key, hinted_value, value): @pytest.fixture def complex_map(): return {"test" + str(i): ((MapObject.HASH_MAP, - {"key_1": ((1, ["value_1", 1.0]), CollectionObject), - "key_2": ((1, [["value_2_1", "1.0"], ["value_2_2", "0.25"]]), CollectionObject), - "key_3": ((1, [["value_3_1", "1.0"], ["value_3_2", "0.25"]]), CollectionObject), - "key_4": ((1, [["value_4_1", "1.0"], ["value_4_2", "0.25"]]), CollectionObject), + {"key_1": ((CollectionObject.ARR_LIST, ["value_1", 1.0]), CollectionObject), + "key_2": ((CollectionObject.ARR_LIST, [["value_2_1", "1.0"], ["value_2_2", "0.25"]]), + CollectionObject), + "key_3": ((CollectionObject.ARR_LIST, [["value_3_1", "1.0"], ["value_3_2", "0.25"]]), + CollectionObject), + "key_4": ((CollectionObject.ARR_LIST, [["value_4_1", "1.0"], ["value_4_2", "0.25"]]), + CollectionObject), 'key_5': False, "key_6": "value_6"}), MapObject) for i in range(10000)} diff --git a/tests/common/test_sql.py b/tests/common/test_sql.py index 0841b7f..b947fbc 100644 --- a/tests/common/test_sql.py +++ b/tests/common/test_sql.py @@ -325,3 +325,131 @@ async def async_inner(): assert test_value == received return async_inner() if isinstance(cache, AioCache) else inner() + + +VARBIN_CREATE_QUERY = 'CREATE TABLE VarbinTable(id int primary key, varbin VARBINARY)' +VARBIN_DROP_QUERY = 'DROP TABLE VarbinTable' +VARBIN_MERGE_QUERY = 'MERGE INTO VarbinTable(id, varbin) VALUES (?, ?)' +VARBIN_SELECT_QUERY = 'SELECT * FROM VarbinTable' + +VARBIN_TEST_PARAMS = [ + bytearray('Test message', 'UTF-8'), + bytes('Test message', 'UTF-8') +] + + +@pytest.fixture +def varbin_table(client): + client.sql(VARBIN_CREATE_QUERY) + yield None + client.sql(VARBIN_DROP_QUERY) + + +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +def test_sql_cache_varbinary_handling(client, varbin_table, value): + client.sql(VARBIN_MERGE_QUERY, query_args=(1, value)) + with client.sql(VARBIN_SELECT_QUERY) as cursor: + for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break + + +@pytest.fixture +async def varbin_table_async(async_client): + await async_client.sql(VARBIN_CREATE_QUERY) + yield None + await async_client.sql(VARBIN_DROP_QUERY) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +async def test_sql_cache_varbinary_handling_async(async_client, varbin_table_async, value): + await async_client.sql(VARBIN_MERGE_QUERY, query_args=(1, value)) + async with async_client.sql(VARBIN_SELECT_QUERY) as cursor: + async for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break + + +@pytest.fixture +def varbin_cache_settings(): + cache_name = 'varbin_cache' + table_name = f'{cache_name}_table'.upper() + + yield { + PROP_NAME: cache_name, + PROP_SQL_SCHEMA: 'PUBLIC', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_QUERY_ENTITIES: [ + { + 'table_name': table_name, + 'key_field_name': 'ID', + 'value_field_name': 'VALUE', + 'key_type_name': 'java.lang.Long', + 'value_type_name': 'byte[]', + 'query_indexes': [], + 'field_name_aliases': [], + 'query_fields': [ + { + 'name': 'ID', + 'type_name': 'java.lang.Long', + 'is_key_field': True, + 'is_notnull_constraint_field': True, + }, + { + 'name': 'VALUE', + 'type_name': 'byte[]', + }, + ], + }, + ], + } + + +VARBIN_CACHE_TABLE_NAME = 'varbin_cache_table'.upper() +VARBIN_CACHE_SELECT_QUERY = f'SELECT * FROM {VARBIN_CACHE_TABLE_NAME}' + + +@pytest.fixture +def varbin_cache(client, varbin_cache_settings): + cache = client.get_or_create_cache(varbin_cache_settings) + yield cache + cache.destroy() + + +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +def test_cache_varbinary_handling(client, varbin_cache, value): + varbin_cache.put(1, value) + with client.sql(VARBIN_CACHE_SELECT_QUERY) as cursor: + for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break + + +@pytest.fixture +async def varbin_cache_async(async_client, varbin_cache_settings): + cache = await async_client.get_or_create_cache(varbin_cache_settings) + yield cache + await cache.destroy() + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'value', VARBIN_TEST_PARAMS +) +async def test_cache_varbinary_handling_async(async_client, varbin_cache_async, value): + await varbin_cache_async.put(1, value) + async with async_client.sql(VARBIN_CACHE_SELECT_QUERY) as cursor: + async for row in cursor: + assert isinstance(row[1], bytes) + assert row[1] == value + break From fb400f17b455c6d0228124e86e85da23f56c7846 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Mon, 12 Apr 2021 13:14:45 +0300 Subject: [PATCH 30/62] IGNITE-14518 Add proper license and long description to package info - Fixes #31. --- scripts/apply_pull_request.sh | 239 +++++++++++++++++++++++++++++++++ scripts/git_patch_functions.sh | 129 ++++++++++++++++++ setup.py | 4 +- 3 files changed, 371 insertions(+), 1 deletion(-) create mode 100755 scripts/apply_pull_request.sh create mode 100644 scripts/git_patch_functions.sh diff --git a/scripts/apply_pull_request.sh b/scripts/apply_pull_request.sh new file mode 100755 index 0000000..ba05a82 --- /dev/null +++ b/scripts/apply_pull_request.sh @@ -0,0 +1,239 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Pull request applier. +# + +# +# Start of Functions. +# + +# +# Prints usage. +# +usage () { + echo 'Usage: scripts/apply-pull-request.sh [-tb|--targetbranch ] [--with-gpg] [-s|--sign-off]' + echo 'The script takes pull-request by given id and merges (with squash) all changes to target branch (master by default).' + echo "Argument 'pull-request-id' is mandatory." + echo "Target branch can be overwritten by using [-tb|--targetbranch ] argument paramethers." +} + +# +# End of Functions. +# + +if [ "${GIT_HOME}" = "" ]; then + GIT_HOME="$(dirname "$(cd "$(dirname "$0")"; "pwd")")"; +fi + +cd "${GIT_HOME}" || { echo "failed to change director ${GIT_HOME}"; exit 1; } + +if [ "${SCRIPTS_HOME}" = "" ]; then + SCRIPTS_HOME="${GIT_HOME}/scripts/" +fi + +. "${SCRIPTS_HOME}"/git_patch_functions.sh # Import patch functions. + +PR_ID=$1 + +# +# Start reading of command line params. +# +if [ "${PR_ID}" = "" ]; then + echo "$0, ERROR:" + echo >&2 "You have to specify 'pull-request-id'." + echo + usage + exit 1 +fi + +if [ "${PR_ID}" = "-h" ]; then + usage + exit 0 +fi + +if [ "${PR_ID}" = "--help" ]; then + usage + exit 0 +fi + + +while [[ $# -ge 2 ]] +do + key="$2" + + case $key in + -tb|--targetbranch) + TARGET_BRANCH="$3" + shift 2 + ;; + + --with-gpg) + WITH_GPG="true" + shift + ;; + + -s|--sign-off) + WITH_SIGN_OFF="true" + shift + ;; + + *) + echo "Unknown parameter: ${key}" + echo + usage + exit 1 + ;; + esac +done +# +# Enf reading of command line params. +# + + +# Script variables. +if [ "${APACHE_GIT}" = "" ]; then + APACHE_GIT="https://gitbox.apache.org/repos/asf/ignite-python-thin-client.git" +fi + +if [ "${GITHUB_MIRROR}" = "" ]; then + GITHUB_MIRROR="git@github.com:apache/ignite-python-thin-client.git" +fi + +if [ "${TARGET_BRANCH}" = "" ]; then + TARGET_BRANCH="master" +fi + +requireCleanWorkTree "${GIT_HOME}" + +CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD) + +if [ "$CURRENT_BRANCH" != "${TARGET_BRANCH}" ]; then + echo "$0, ERROR:" + echo "You have to be on ${TARGET_BRANCH} branch." + + exit 1 +fi + +# Check that target branch is up-to-date. +APACHE_GIT_TARGET_BRANCH="apache-git-target-br-tmp" + +git fetch ${APACHE_GIT} ${TARGET_BRANCH}:${APACHE_GIT_TARGET_BRANCH} &> /dev/null +if test $? != 0; then + echo "$0, ERROR:" + echo >&2 "Couldn't fetch '${TARGET_BRANCH}' branch from ${APACHE_GIT}." + exit 1 +fi + +LOCAL_TARGET_BR_HASH=$(git rev-parse @) +REMOTE_TARGET_BR_HASH=$(git rev-parse ${APACHE_GIT_TARGET_BRANCH}) +BASE_HASH=$(git merge-base @ ${APACHE_GIT_TARGET_BRANCH}) + +git branch -D ${APACHE_GIT_TARGET_BRANCH} &> /dev/null + +if [ "$LOCAL_TARGET_BR_HASH" != "$REMOTE_TARGET_BR_HASH" ]; then + echo "$0, ERROR:" + + if [ "$LOCAL_TARGET_BR_HASH" = "$BASE_HASH" ]; then + echo "Your local ${TARGET_BRANCH} branch is not up-to-date. You need to pull." + elif [ "$REMOTE_TARGET_BR_HASH" = "$BASE_HASH" ]; then + echo "Your local ${TARGET_BRANCH} branch is ahead of ${TARGET_BRANCH} branch at Apache git. You need to push." + else + echo "Your local ${TARGET_BRANCH} and Apache git ${TARGET_BRANCH} branches diverged. You need to pull, merge and pull." + fi + + exit 1 +fi + +echo "Local ${TARGET_BRANCH} is Up-to-date." +echo + +# Checkout pull-request branch. +PR_BRANCH_NAME="pull-${PR_ID}-head" + +git fetch "${GITHUB_MIRROR}" "pull/${PR_ID}/head:${PR_BRANCH_NAME}" &> /dev/null +if test $? != 0; then + echo "$0, ERROR:" + echo >&2 "There was not found pull request by ID = '${PR_ID}'." + exit 1 +fi + +# Get author name number. +git checkout "${PR_BRANCH_NAME}" &> /dev/null +if test $? != 0; then + echo "$0, ERROR:" + echo >&2 "Failed to checkout '${PR_BRANCH_NAME}' branch (the branch not found or already exists)." + exit 1 +fi + +AUTHOR="$(git --no-pager show -s --format="%aN <%aE>" HEAD)" +ORIG_COMMENT="$(git log -1 --pretty=%B)" + +echo "Author of pull-request: '$AUTHOR'." +echo + +# Update local target branch. +git checkout ${TARGET_BRANCH} &> /dev/null + +# Take changes. +git merge --squash "${PR_BRANCH_NAME}" &> /dev/null +if test $? != 0; then + git reset --hard &> /dev/null + + echo "$0, ERROR:" + echo >&2 "Could not merge the pull-request to ${TARGET_BRANCH} without conflicts. All local changes have been discarded. You're on ${TARGET_BRANCH} branch." + exit 1 +fi + +echo "Original comment is" +echo "\"${ORIG_COMMENT}\"" +echo "Press [ENTER] if you're agree with the comment or type your comment and press [ENTER]:" +read -r COMMENT +echo + +if [ "${COMMENT}" == "" ]; then + COMMENT=${ORIG_COMMENT} +fi + +COMMENT="${COMMENT} - Fixes #${PR_ID}." + +if [ "${EXCLUDE_SPECIAL_FILE}" = "true" ]; then + git checkout HEAD ignite-pull-request-id +fi + +SIGN_OPTION="" +if [ -n "${WITH_GPG}" ]; then + SIGN_OPTION="-S" +fi + +if [ -n "${WITH_SIGN_OFF}" ]; then + SIGN_OPTION="${SIGN_OPTION} -s" +fi + +git commit --author "${AUTHOR}" -a ${SIGN_OPTION} -m "${COMMENT}" &> /dev/null + +echo "Squash commit for pull request with id='${PR_ID}' has been added. The commit has been added with comment '${COMMENT}'." +echo "Now you can review changes of the last commit at ${TARGET_BRANCH} and push it into ${APACHE_GIT} git after." +echo "If you want to decline changes, you can remove the last commit from your repo by 'git reset --hard HEAD^'." +echo + +# Clean-up. +git branch -D "${PR_BRANCH_NAME}" &> /dev/null + +echo 'Successfully completed.' diff --git a/scripts/git_patch_functions.sh b/scripts/git_patch_functions.sh new file mode 100644 index 0000000..cc3aac3 --- /dev/null +++ b/scripts/git_patch_functions.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Git patch functions. +# + +# +# Define functions. +# + +# +# Formats patch. Create patch in one commit from user who run script and with default comment. +# +# Params: +# - Git home. +# - Default branch. +# - Patch with patch. +# - Suffix for created patch-file. +# +formatPatch () { + GIT_HOME=$1 + DEFAULT_BRANCH=$2 + PATCHED_BRANCH=$3 + PATCH_SUFFIX=$4 + + if [ "${IGNITE_CURRENT_BRANCH}" = "${IGNITE_DEFAULT_BRANCH}" ] + then + echo "$0, ERROR:" + echo "You are on Default branch. Please, checkout branch with changes." + + exit 1 + fi + + cd "${GIT_HOME}" || { echo "failed to change directory to ${GIT_HOME}"; exit 1; } + + git checkout "${DEFAULT_BRANCH}" + + DEF_BRANCH_REV="$(git rev-parse --short HEAD)" + + git checkout -b tmppatch + + # Merge to make only one commit. + git merge --squash ${PATCHED_BRANCH} + git commit -a -m "# ${PATCHED_BRANCH}" + + PATCH_FILE=${PATCHES_HOME}'/'${DEFAULT_BRANCH}_${DEF_BRANCH_REV}_${PATCHED_BRANCH}${PATCH_SUFFIX} + + git format-patch ${DEFAULT_BRANCH} --stdout > ${PATCH_FILE} + echo "Patch file created." + + git checkout ${PATCHED_BRANCH} + + git branch -D tmppatch # Delete tmp branch. + + echo + echo "Patch created: ${PATCH_FILE}" +} + +# +# Determines Current branch. +# +# Params: +# - Git home. +# Return - Current branch. +# +determineCurrentBranch () { + GIT_HOME=$1 + + cd ${GIT_HOME} || { echo "failed to change directory to $1"; exit 1; } + + CURRENT_BRANCH=`git rev-parse --abbrev-ref HEAD` + + echo "$CURRENT_BRANCH" +} + +# +# Checks that given git repository has clean work tree (there is no uncommited changes). +# Exit with code 1 in error case. +# +# Params: +# - Git home. +# +requireCleanWorkTree () { + cd "$1" || { echo "failed to change directory to $1"; exit 1; } # At git home. + + # Update the index + git update-index -q --ignore-submodules --refresh + err=0 + + # Disallow unstaged changes in the working tree + if ! git diff-files --quiet --ignore-submodules -- + then + echo "$0, ERROR:" + echo >&2 "You have unstaged changes." + git diff-files --name-status -r --ignore-submodules -- >&2 + err=1 + fi + + # Disallow uncommitted changes in the index + if ! git diff-index --cached --quiet HEAD --ignore-submodules -- + then + echo "$0, ERROR:" + echo >&2 "Your index contains uncommitted changes." + git diff-index --cached --name-status -r --ignore-submodules HEAD -- >&2 + err=1 + fi + + if [ $err = 1 ] + then + echo >&2 "Please commit or stash them." + exit 1 + fi +} diff --git a/setup.py b/setup.py index 5db3aed..4c2ab22 100644 --- a/setup.py +++ b/setup.py @@ -86,7 +86,6 @@ def is_a_requirement(line): with open('README.md', 'r', encoding='utf-8') as readme_file: long_description = readme_file.read() -version = '' with open('pyignite/__init__.py', 'r') as fd: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) @@ -111,6 +110,8 @@ def run_setup(with_binary=True): author='The Apache Software Foundation', author_email='dev@ignite.apache.org', description='Apache Ignite binary client Python API', + long_description=long_description, + long_description_content_type='text/markdown', url='https://github.com/apache/ignite-python-thin-client', packages=setuptools.find_packages(), install_requires=requirements['install'], @@ -119,6 +120,7 @@ def run_setup(with_binary=True): extras_require={ 'docs': requirements['docs'], }, + license="Apache License 2.0", classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', From 453ea5d9cf9a8c0ebb6aa92d8268f02d195230d5 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Wed, 14 Apr 2021 16:09:32 +0300 Subject: [PATCH 31/62] IGNITE-14534 Add script to build wheels on Windows This closes #32 --- README.md | 8 ++++++-- scripts/BuildWheels.ps1 | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 scripts/BuildWheels.ps1 diff --git a/README.md b/README.md index f44276f..7a23147 100644 --- a/README.md +++ b/README.md @@ -45,9 +45,13 @@ There is an optional C extension to speedup some computational intensive tasks. ***NB!* Docker is required.** - Ready wheels for `x86` and `x86-64` for different python versions (3.6, 3.7, 3.8 and 3.9) will be - located in `./distr` directory. +- On Windows MSVC 14.x required, and it should be in path, also python versions 3.6, 3.7, 3.8 and 3.9 both for x86 and + x86-64 should be installed. You can disable some of these versions but you'd need to edit script for that. +- For building `wheels` for Windows, invoke script `.\scripts\BuildWheels.ps1` using PowerShell. Just make sure that + your execution policy allows execution of scripts in your environment. + Ready wheels for `x86` and `x86-64` for different python versions (3.6, 3.7, 3.8 and 3.9) will be + located in `distr` directory. ### Updating from older version diff --git a/scripts/BuildWheels.ps1 b/scripts/BuildWheels.ps1 new file mode 100644 index 0000000..cf7424e --- /dev/null +++ b/scripts/BuildWheels.ps1 @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +$PyVers="36","37","38","39" + +[System.Collections.ArrayList]$PyVersFull = $PyVers +foreach ($Ver in $PyVers) +{ + [Void]$PyVersFull.Add("$Ver-32") +} + +foreach ($Ver in $PyVersFull) +{ + & "$env:LOCALAPPDATA\Programs\Python\Python$Ver\python.exe" -m venv epy$Ver + + . ".\epy$Ver\Scripts\Activate.ps1" + pip install -e . + pip install wheel + pip wheel . --no-deps -w distr +} + From 572c8b952cf1c2d90fc874c1e896e5571ec48174 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 14 Apr 2021 19:05:15 +0300 Subject: [PATCH 32/62] Update version for next release --- pyignite/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyignite/__init__.py b/pyignite/__init__.py index c26c59a..4b77f68 100644 --- a/pyignite/__init__.py +++ b/pyignite/__init__.py @@ -17,4 +17,4 @@ from pyignite.aio_client import AioClient from pyignite.binary import GenericObjectMeta -__version__ = '0.4.0-dev' +__version__ = '0.5.0-dev' From 4b39956754e35fa0bcc993e5cc2263042fb528a8 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 14 Apr 2021 19:38:38 +0300 Subject: [PATCH 33/62] Add release notes for 0.4.0 --- RELEASE_NOTES.txt | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 RELEASE_NOTES.txt diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt new file mode 100644 index 0000000..9fee8ea --- /dev/null +++ b/RELEASE_NOTES.txt @@ -0,0 +1,26 @@ +Apache Ignite python thin client +================================ + +0.4.0 +-------------------------------- +* Added partition awareness support +* Added asyncio support +* Added C module to speedup hashcode calculation +* Implement context management for connection method +* Implement cursors and context management for ScanQuery, SqlQuery and SqlFieldsQuery +* Add the ability to activate/deactivate the cluster +* Implement support for big-endianness +* Implement support of password for certificates +* Fix performance issues while working with big bytearrays and binary objects +* Fix serialization/deserialization of cache configuration +* Fix handling of null fields +* Fix SQL API +* Fix UUID serialization/deserialization +* Fix nested complex objects +* Fix incorrect hash code calculation for classes as composite keys +* Fix hashing of complex object +* Fix insert and select VARBINARY data type through SQL +* Fix wrong order of the SQL query result +* Fix handling of bytes and bytearrays +* Fix bool arrays handling +* Fix cache.get_size with non-default PeekModes From 8b1e61c87e423058a23112ceade542465d8f710f Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Fri, 16 Apr 2021 11:46:01 +0300 Subject: [PATCH 34/62] IGNITE-14564 Add LICENSE and NOTICE files to all artifacts - Fixes #34. --- MANIFEST.in | 13 ++++++- NOTICE | 5 +++ examples/docker-compose.yml | 36 +++++++++++++++++++ examples/readme.md | 6 ++++ setup.py | 1 + .../affinity/test_affinity_request_routing.py | 2 +- 6 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 NOTICE create mode 100644 examples/docker-compose.yml diff --git a/MANIFEST.in b/MANIFEST.in index 783a2fe..3f62aea 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1,13 @@ -recursive-include requirements * +graft requirements +graft examples +graft docs +recursive-exclude docs/generated * +graft tests +recursive-exclude tests/config *.xml +recursive-exclude tests/logs * +global-exclude *.py[cod] +global-exclude *__pycache__* +include tox.ini include README.md +include LICENSE +include NOTICE diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..feeebfd --- /dev/null +++ b/NOTICE @@ -0,0 +1,5 @@ +Apache Ignite binary client Python API +Copyright 2021 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/examples/docker-compose.yml b/examples/docker-compose.yml new file mode 100644 index 0000000..76c91b3 --- /dev/null +++ b/examples/docker-compose.yml @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +services: + ignite_0: + image: apacheignite/ignite:latest + ports: + - 10800:10800 + restart: always + network_mode: host + + ignite_1: + image: apacheignite/ignite:latest + ports: + - 10800:10801 + restart: always + network_mode: host + + ignite_2: + image: apacheignite/ignite:latest + ports: + - 10800:10802 + restart: always + network_mode: host diff --git a/examples/readme.md b/examples/readme.md index 3caf6c1..8fd4848 100644 --- a/examples/readme.md +++ b/examples/readme.md @@ -15,3 +15,9 @@ This directory contains the following example files: For the explanation of the examples please refer to the [Examples of usage](https://apache-ignite-binary-protocol-client.readthedocs.io/en/latest/examples.html) section of the `pyignite` documentation. + +You can start Apache Ignite locally for running examples using `docker` and `docker-compose` +```bash +cd ./examples +docker-compose up +``` diff --git a/setup.py b/setup.py index 4c2ab22..7a3cb70 100644 --- a/setup.py +++ b/setup.py @@ -121,6 +121,7 @@ def run_setup(with_binary=True): 'docs': requirements['docs'], }, license="Apache License 2.0", + license_files=('LICENSE', 'NOTICE'), classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', diff --git a/tests/affinity/test_affinity_request_routing.py b/tests/affinity/test_affinity_request_routing.py index 9c94aa4..90c71b2 100644 --- a/tests/affinity/test_affinity_request_routing.py +++ b/tests/affinity/test_affinity_request_routing.py @@ -413,7 +413,7 @@ async def test_new_registered_cache_affinity_async(async_client): async with create_caches_async(async_client) as caches: key = 12 test_cache = random.choice(caches) - test_cache.put(key, key) + await test_cache.put(key, key) await wait_for_affinity_distribution_async(test_cache, key, 3) caches.append(await async_client.create_cache('new_cache')) From 9f72781795e6fa1f427e559426ab931a49772716 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Fri, 23 Apr 2021 10:45:03 +0300 Subject: [PATCH 35/62] IGNITE-14595 Implement ExpiryPolicy support - Fixes #35. --- README.md | 4 +- docs/async_examples.rst | 31 +- docs/datatypes/cache_props.rst | 152 ++--- docs/examples.rst | 32 +- docs/source/pyignite.aio_cluster.rst | 22 + docs/source/pyignite.cluster.rst | 22 + .../pyignite.datatypes.cluster_state.rst | 21 + .../pyignite.datatypes.expiry_policy.rst | 21 + docs/source/pyignite.datatypes.rst | 3 +- docs/source/pyignite.rst | 3 +- examples/expiry_policy.py | 113 ++++ examples/failover.py | 45 +- pyignite/aio_cache.py | 57 +- pyignite/aio_client.py | 17 +- pyignite/aio_cluster.py | 20 +- pyignite/api/cache_config.py | 39 +- pyignite/api/key_value.py | 544 ++++++++---------- pyignite/api/sql.py | 84 ++- pyignite/cache.py | 107 ++-- pyignite/client.py | 21 +- pyignite/cluster.py | 20 +- pyignite/connection/protocol_context.py | 3 + pyignite/cursors.py | 58 +- pyignite/datatypes/__init__.py | 2 + pyignite/datatypes/cache_config.py | 74 +-- pyignite/datatypes/cache_properties.py | 9 +- pyignite/datatypes/cluster_state.py | 4 + pyignite/datatypes/expiry_policy.py | 110 ++++ pyignite/datatypes/prop_codes.py | 1 + pyignite/queries/query.py | 33 +- tests/affinity/conftest.py | 7 +- tests/affinity/test_affinity.py | 4 +- tests/common/conftest.py | 11 + tests/common/test_binary.py | 4 +- tests/common/test_cache_config.py | 13 +- tests/common/test_expiry_policy.py | 171 ++++++ tests/conftest.py | 1 + tests/custom/test_cluster.py | 11 +- 38 files changed, 1235 insertions(+), 659 deletions(-) create mode 100644 docs/source/pyignite.aio_cluster.rst create mode 100644 docs/source/pyignite.cluster.rst create mode 100644 docs/source/pyignite.datatypes.cluster_state.rst create mode 100644 docs/source/pyignite.datatypes.expiry_policy.rst create mode 100644 examples/expiry_policy.py create mode 100644 pyignite/datatypes/expiry_policy.py create mode 100644 tests/common/test_expiry_policy.py diff --git a/README.md b/README.md index 7a23147..8e009de 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,9 @@ Apache Ignite thin (binary protocol) client, written in Python 3. ## Prerequisites -- Python 3.4 or above (3.6, 3.7 and 3.8 are tested), +- Python 3.6 or above (3.6, 3.7, 3.8 and 3.9 are tested), - Access to Apache Ignite node, local or remote. The current thin client - version was tested on Apache Ignite 2.7.0 (binary client protocol 1.2.0). + version was tested on Apache Ignite 2.10 (binary client protocol 1.7.0). ## Installation diff --git a/docs/async_examples.rst b/docs/async_examples.rst index 363599a..4bc21ae 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -48,14 +48,39 @@ that yields the resulting rows. .. literalinclude:: ../examples/async_key_value.py :language: python - :dedent: 4 + :dedent: 8 :lines: 39-50 +ExpiryPolicy +============ +File: `expiry_policy.py`_. -File: `async_sql.py`_. +You can enable expiry policy (TTL) by two approaches. + +Firstly, expiry policy can be set for entire cache by setting :py:attr:`~pyignite.datatypes.prop_codes.PROP_EXPIRY_POLICY` +in cache settings dictionary on creation. + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 72-75 + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 81-89 + +Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use +:py:meth:`~pyignite.cache.BaseCache.with_expire_policy` + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 96-105 SQL --- +File: `async_sql.py`_. First let us establish a connection. @@ -146,6 +171,6 @@ Finally, delete the tables used in this example with the following queries: - +.. _expiry_policy.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/expiry_policy.py .. _async_key_value.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_key_value.py .. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py \ No newline at end of file diff --git a/docs/datatypes/cache_props.rst b/docs/datatypes/cache_props.rst index 3cabbe6..380ccf2 100644 --- a/docs/datatypes/cache_props.rst +++ b/docs/datatypes/cache_props.rst @@ -26,78 +26,80 @@ Please refer to the `Apache Ignite Data Grid`_ documentation on cache synchronization, rebalance, affinity and other cache configuration-related matters. -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| Property | Ordinal | Property | Description | -| name | value | type | | -+=======================================+==========+==========+=======================================================+ -| Read/write cache properties, used to configure cache via :py:meth:`~pyignite.client.Client.create_cache` or | -| :py:meth:`~pyignite.client.Client.get_or_create_cache` of :py:class:`~pyignite.client.Client` | -| (:py:meth:`~pyignite.aio_client.AioClient.create_cache` or | -| :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` of :py:class:`~pyignite.aio_client.AioClient`). | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_NAME | 0 | str | Cache name. This is the only *required* property. | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_MODE | 1 | int | Cache mode: LOCAL=0, REPLICATED=1, PARTITIONED=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_ATOMICITY_MODE | 2 | int | Cache atomicity mode: TRANSACTIONAL=0, ATOMIC=1 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_BACKUPS_NUMBER | 3 | int | Number of backups | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_WRITE_SYNCHRONIZATION_MODE | 4 | int | Write synchronization mode: FULL_SYNC=0, | -| | | | FULL_ASYNC=1, PRIMARY_SYNC=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_COPY_ON_READ | 5 | bool | Copy-on-read | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_READ_FROM_BACKUP | 6 | bool | Read from backup | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_DATA_REGION_NAME | 100 | str | Data region name | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_IS_ONHEAP_CACHE_ENABLED | 101 | bool | Is OnHeap cache enabled? | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_ENTITIES | 200 | list | A list of query entities (see `Query entity`_) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_PARALLELISM | 201 | int | Query parallelism | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_QUERY_DETAIL_METRIC_SIZE | 202 | int | Query detail metric size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_SCHEMA | 203 | str | SQL schema | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_INDEX_INLINE_MAX_SIZE | 204 | int | SQL index inline maximum size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_SQL_ESCAPE_ALL | 205 | bool | Turns on SQL escapes | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_MAX_QUERY_ITERATORS | 206 | int | Maximum number of query iterators | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_MODE | 300 | int | Rebalance mode: SYNC=0, ASYNC=1, NONE=2 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_DELAY | 301 | int | Rebalance delay (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_TIMEOUT | 302 | int | Rebalance timeout (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_BATCH_SIZE | 303 | int | Rebalance batch size | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_BATCHES_PREFETCH_COUNT | 304 | int | Rebalance batches prefetch count | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_ORDER | 305 | int | Rebalance order | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_REBALANCE_THROTTLE | 306 | int | Rebalance throttle (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_GROUP_NAME | 400 | str | Group name | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_CACHE_KEY_CONFIGURATION | 401 | list | Cache key configuration (see `Cache key`_) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_DEFAULT_LOCK_TIMEOUT | 402 | int | Default lock timeout (ms) | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_MAX_CONCURRENT_ASYNC_OPERATIONS | 403 | int | Maximum number of concurrent asynchronous operations | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_PARTITION_LOSS_POLICY | 404 | int | Partition loss policy: READ_ONLY_SAFE=0, | -| | | | READ_ONLY_ALL=1, READ_WRITE_SAFE=2, READ_WRITE_ALL=3, | -| | | | IGNORE=4 | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_EAGER_TTL | 405 | bool | Eager TTL | -+---------------------------------------+----------+----------+-------------------------------------------------------+ -| PROP_STATISTICS_ENABLED | 406 | bool | Statistics enabled | -+---------------------------------------+----------+----------+-------------------------------------------------------+ ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| Property | Ordinal | Property | Description | +| name | value | type | | ++=======================================+==========+============================================================+=======================================================+ +| Read/write cache properties, used to configure cache via :py:meth:`~pyignite.client.Client.create_cache` or | +| :py:meth:`~pyignite.client.Client.get_or_create_cache` of :py:class:`~pyignite.client.Client` | +| (:py:meth:`~pyignite.aio_client.AioClient.create_cache` or | +| :py:meth:`~pyignite.aio_client.AioClient.get_or_create_cache` of :py:class:`~pyignite.aio_client.AioClient`). | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_NAME | 0 | str | Cache name. This is the only *required* property. | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_MODE | 1 | int | Cache mode: LOCAL=0, REPLICATED=1, PARTITIONED=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_ATOMICITY_MODE | 2 | int | Cache atomicity mode: TRANSACTIONAL=0, ATOMIC=1 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_BACKUPS_NUMBER | 3 | int | Number of backups | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_WRITE_SYNCHRONIZATION_MODE | 4 | int | Write synchronization mode: FULL_SYNC=0, | +| | | | FULL_ASYNC=1, PRIMARY_SYNC=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_COPY_ON_READ | 5 | bool | Copy-on-read | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_READ_FROM_BACKUP | 6 | bool | Read from backup | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_DATA_REGION_NAME | 100 | str | Data region name | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_IS_ONHEAP_CACHE_ENABLED | 101 | bool | Is OnHeap cache enabled? | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_ENTITIES | 200 | list | A list of query entities (see `Query entity`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_PARALLELISM | 201 | int | Query parallelism | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_QUERY_DETAIL_METRIC_SIZE | 202 | int | Query detail metric size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_SCHEMA | 203 | str | SQL schema | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_INDEX_INLINE_MAX_SIZE | 204 | int | SQL index inline maximum size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_SQL_ESCAPE_ALL | 205 | bool | Turns on SQL escapes | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_MAX_QUERY_ITERATORS | 206 | int | Maximum number of query iterators | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_MODE | 300 | int | Rebalance mode: SYNC=0, ASYNC=1, NONE=2 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_DELAY | 301 | int | Rebalance delay (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_TIMEOUT | 302 | int | Rebalance timeout (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_BATCH_SIZE | 303 | int | Rebalance batch size | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_BATCHES_PREFETCH_COUNT | 304 | int | Rebalance batches prefetch count | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_ORDER | 305 | int | Rebalance order | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_REBALANCE_THROTTLE | 306 | int | Rebalance throttle (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_GROUP_NAME | 400 | str | Group name | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_CACHE_KEY_CONFIGURATION | 401 | list | Cache key configuration (see `Cache key`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_DEFAULT_LOCK_TIMEOUT | 402 | int | Default lock timeout (ms) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_MAX_CONCURRENT_ASYNC_OPERATIONS | 403 | int | Maximum number of concurrent asynchronous operations | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_PARTITION_LOSS_POLICY | 404 | int | Partition loss policy: READ_ONLY_SAFE=0, | +| | | | READ_ONLY_ALL=1, READ_WRITE_SAFE=2, READ_WRITE_ALL=3, | +| | | | IGNORE=4 | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_EAGER_TTL | 405 | bool | Eager TTL | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_STATISTICS_ENABLED | 406 | bool | Statistics enabled | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ +| PROP_EXPIRY_POLICY | 407 | :py:class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy` | Set expiry policy (see `Expiry policy`_) | ++---------------------------------------+----------+------------------------------------------------------------+-------------------------------------------------------+ Query entity ------------ @@ -159,3 +161,9 @@ A dict of the following format: - `affinity_key_field_name`: name of the affinity key field. .. _Apache Ignite Data Grid: https://apacheignite.readme.io/docs/data-grid + +Expiry policy +------------- + +Set expiry policy to cache (see :py:class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy`). If set to `None`, +expiry policy will not be set. diff --git a/docs/examples.rst b/docs/examples.rst index 0379330..07ec65c 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -85,6 +85,33 @@ As a rule of thumb: Refer the :ref:`data_types` section for the full list of parser/constructor classes you can use as type hints. +ExpiryPolicy +============ +File: `expiry_policy.py`_. + +You can enable expiry policy (TTL) by two approaches. + +Firstly, expiry policy can be set for entire cache by setting :py:attr:`~pyignite.datatypes.prop_codes.PROP_EXPIRY_POLICY` +in cache settings dictionary on creation. + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 31-34 + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 40-46 + +Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use +:py:meth:`~pyignite.cache.BaseCache.with_expire_policy` + +.. literalinclude:: ../examples/expiry_policy.py + :language: python + :dedent: 12 + :lines: 53-60 + Scan ==== File: `scans.py`_. @@ -558,13 +585,13 @@ Gather 3 Ignite nodes on `localhost` into one cluster and run: .. literalinclude:: ../examples/failover.py :language: python - :lines: 16-53 + :lines: 16-52 Then try shutting down and restarting nodes, and see what happens. .. literalinclude:: ../examples/failover.py :language: python - :lines: 55-67 + :lines: 54-66 Client reconnection do not require an explicit user action, like calling a special method or resetting a parameter. @@ -683,6 +710,7 @@ with the following message: .. _type_hints.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/type_hints.py .. _failover.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/failover.py .. _scans.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/scans.py +.. _expiry_policy.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/expiry_policy.py .. _sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/sql.py .. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py .. _binary_basics.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/binary_basics.py diff --git a/docs/source/pyignite.aio_cluster.rst b/docs/source/pyignite.aio_cluster.rst new file mode 100644 index 0000000..ee2fa1b --- /dev/null +++ b/docs/source/pyignite.aio_cluster.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.aio_cluster module +=========================== + +.. automodule:: pyignite.aio_cluster + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.cluster.rst b/docs/source/pyignite.cluster.rst new file mode 100644 index 0000000..cacdfb7 --- /dev/null +++ b/docs/source/pyignite.cluster.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.cluster module +======================= + +.. automodule:: pyignite.cluster + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.cluster_state.rst b/docs/source/pyignite.datatypes.cluster_state.rst new file mode 100644 index 0000000..a1d7663 --- /dev/null +++ b/docs/source/pyignite.datatypes.cluster_state.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.cluster_state module +======================================= + +.. automodule:: pyignite.datatypes.cluster_state + :members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.expiry_policy.rst b/docs/source/pyignite.datatypes.expiry_policy.rst new file mode 100644 index 0000000..87d651e --- /dev/null +++ b/docs/source/pyignite.datatypes.expiry_policy.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.expiry_policy module +======================================= + +.. automodule:: pyignite.datatypes.expiry_policy + :members: + :show-inheritance: diff --git a/docs/source/pyignite.datatypes.rst b/docs/source/pyignite.datatypes.rst index 269d500..70f7714 100644 --- a/docs/source/pyignite.datatypes.rst +++ b/docs/source/pyignite.datatypes.rst @@ -31,6 +31,8 @@ Submodules pyignite.datatypes.cache_config pyignite.datatypes.cache_properties pyignite.datatypes.complex + pyignite.datatypes.cluster_state + pyignite.datatypes.expiry_policy pyignite.datatypes.internal pyignite.datatypes.key_value pyignite.datatypes.null_object @@ -39,4 +41,3 @@ Submodules pyignite.datatypes.primitive_objects pyignite.datatypes.sql pyignite.datatypes.standard - diff --git a/docs/source/pyignite.rst b/docs/source/pyignite.rst index 85e31a8..c2a36fe 100644 --- a/docs/source/pyignite.rst +++ b/docs/source/pyignite.rst @@ -39,7 +39,8 @@ Submodules pyignite.aio_cache pyignite.client pyignite.aio_client - pyignite.constants + pyignite.cluster + pyignite.aio_cluster pyignite.cursors pyignite.exceptions diff --git a/examples/expiry_policy.py b/examples/expiry_policy.py new file mode 100644 index 0000000..2002da1 --- /dev/null +++ b/examples/expiry_policy.py @@ -0,0 +1,113 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import time + +from pyignite import Client, AioClient +from pyignite.datatypes import ExpiryPolicy +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY +from pyignite.exceptions import NotSupportedByClusterError + + +def main(): + print("Running sync ExpiryPolicy example.") + + client = Client() + with client.connect('127.0.0.1', 10800): + print("Create cache with expiry policy.") + try: + ttl_cache = client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + }) + except NotSupportedByClusterError: + print("'ExpiryPolicy' API is not supported by cluster. Finishing...") + return + + try: + ttl_cache.put(1, 1) + time.sleep(0.5) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = 1 + time.sleep(1.2) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = None + finally: + ttl_cache.destroy() + + print("Create simple Cache and set TTL through `with_expire_policy`") + simple_cache = client.create_cache('test') + try: + ttl_cache = simple_cache.with_expire_policy(access=1.0) + ttl_cache.put(1, 1) + time.sleep(0.5) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = 1 + time.sleep(1.7) + print(f"key = {1}, value = {ttl_cache.get(1)}") + # key = 1, value = None + finally: + simple_cache.destroy() + + +async def async_main(): + print("Running async ExpiryPolicy example.") + + client = AioClient() + async with client.connect('127.0.0.1', 10800): + print("Create cache with expiry policy.") + try: + ttl_cache = await client.create_cache({ + PROP_NAME: 'test', + PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + }) + except NotSupportedByClusterError: + print("'ExpiryPolicy' API is not supported by cluster. Finishing...") + return + + try: + await ttl_cache.put(1, 1) + await asyncio.sleep(0.5) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = 1 + await asyncio.sleep(1.2) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = None + finally: + await ttl_cache.destroy() + + print("Create simple Cache and set TTL through `with_expire_policy`") + simple_cache = await client.create_cache('test') + try: + ttl_cache = simple_cache.with_expire_policy(access=1.0) + await ttl_cache.put(1, 1) + await asyncio.sleep(0.5) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = 1 + await asyncio.sleep(1.7) + value = await ttl_cache.get(1) + print(f"key = {1}, value = {value}") + # key = 1, value = None + finally: + await simple_cache.destroy() + +if __name__ == '__main__': + main() + + loop = asyncio.get_event_loop() + loop.run_until_complete(async_main()) diff --git a/examples/failover.py b/examples/failover.py index 21ab547..3a5ee42 100644 --- a/examples/failover.py +++ b/examples/failover.py @@ -26,31 +26,30 @@ ] -def main(): - client = Client(timeout=4.0) - with client.connect(nodes): - print('Connected') +client = Client(timeout=4.0) +with client.connect(nodes): + print('Connected') - my_cache = client.get_or_create_cache({ - PROP_NAME: 'my_cache', - PROP_CACHE_MODE: CacheMode.PARTITIONED, - PROP_BACKUPS_NUMBER: 2, - }) - my_cache.put('test_key', 0) - test_value = 0 + my_cache = client.get_or_create_cache({ + PROP_NAME: 'my_cache', + PROP_CACHE_MODE: CacheMode.PARTITIONED, + PROP_BACKUPS_NUMBER: 2, + }) + my_cache.put('test_key', 0) + test_value = 0 - # abstract main loop - while True: - try: - # do the work - test_value = my_cache.get('test_key') or 0 - my_cache.put('test_key', test_value + 1) - except (OSError, SocketError) as e: - # recover from error (repeat last command, check data - # consistency or just continue − depends on the task) - print(f'Error: {e}') - print(f'Last value: {test_value}') - print('Reconnecting') + # abstract main loop + while True: + try: + # do the work + test_value = my_cache.get('test_key') or 0 + my_cache.put('test_key', test_value + 1) + except (OSError, SocketError) as e: + # recover from error (repeat last command, check data + # consistency or just continue − depends on the task) + print(f'Error: {e}') + print(f'Last value: {test_value}') + print('Reconnecting') # Connected # Error: Connection broken. diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py index 32f2cb2..f088844 100644 --- a/pyignite/aio_cache.py +++ b/pyignite/aio_cache.py @@ -15,6 +15,7 @@ import asyncio from typing import Any, Iterable, Optional, Union +from .datatypes import ExpiryPolicy from .datatypes.internal import AnyDataObject from .exceptions import CacheCreationError, CacheError, ParameterError from .utils import status_to_exception @@ -80,17 +81,17 @@ class AioCache(BaseCache): :py:meth:`~pyignite.aio_client.AioClient.get_cache` methods instead. See :ref:`this example ` on how to do it. """ - def __init__(self, client: 'AioClient', name: str): + def __init__(self, client: 'AioClient', name: str, expiry_policy: ExpiryPolicy = None): """ Initialize async cache object. For internal use. :param client: Async Ignite client, :param name: Cache name. """ - super().__init__(client, name) + super().__init__(client, name, expiry_policy) async def _get_best_node(self, key=None, key_hint=None): - return await self.client.get_best_node(self._cache_id, key, key_hint) + return await self.client.get_best_node(self, key, key_hint) async def settings(self) -> Optional[dict]: """ @@ -103,7 +104,7 @@ async def settings(self) -> Optional[dict]: """ if self._settings is None: conn = await self._get_best_node() - config_result = await cache_get_configuration_async(conn, self._cache_id) + config_result = await cache_get_configuration_async(conn, self.cache_info) if config_result.status == 0: self._settings = config_result.value @@ -118,7 +119,7 @@ async def destroy(self): Destroys cache with a given name. """ conn = await self._get_best_node() - return await cache_destroy_async(conn, self._cache_id) + return await cache_destroy_async(conn, self.cache_id) @status_to_exception(CacheError) async def get(self, key, key_hint: object = None) -> Any: @@ -134,7 +135,7 @@ async def get(self, key, key_hint: object = None) -> Any: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_async(conn, self._cache_id, key, key_hint=key_hint) + result = await cache_get_async(conn, self.cache_info, key, key_hint=key_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -155,7 +156,7 @@ async def put(self, key, value, key_hint: object = None, value_hint: object = No key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_put_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) + return await cache_put_async(conn, self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint) @status_to_exception(CacheError) async def get_all(self, keys: list) -> list: @@ -166,7 +167,7 @@ async def get_all(self, keys: list) -> list: :return: a dict of key-value pairs. """ conn = await self._get_best_node() - result = await cache_get_all_async(conn, self._cache_id, keys) + result = await cache_get_all_async(conn, self.cache_info, keys) if result.value: keys = list(result.value.keys()) values = await asyncio.gather(*[self.client.unwrap_binary(value) for value in result.value.values()]) @@ -186,7 +187,7 @@ async def put_all(self, pairs: dict): Python type or a tuple of (item, hint), """ conn = await self._get_best_node() - return await cache_put_all_async(conn, self._cache_id, pairs) + return await cache_put_all_async(conn, self.cache_info, pairs) @status_to_exception(CacheError) async def replace(self, key, value, key_hint: object = None, value_hint: object = None): @@ -204,7 +205,7 @@ async def replace(self, key, value, key_hint: object = None, value_hint: object key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_replace_async(conn, self._cache_id, key, value, key_hint=key_hint, value_hint=value_hint) + result = await cache_replace_async(conn, self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -218,9 +219,9 @@ async def clear(self, keys: Optional[list] = None): """ conn = await self._get_best_node() if keys: - return await cache_clear_keys_async(conn, self._cache_id, keys) + return await cache_clear_keys_async(conn, self.cache_info, keys) else: - return await cache_clear_async(conn, self._cache_id) + return await cache_clear_async(conn, self.cache_info) @status_to_exception(CacheError) async def clear_key(self, key, key_hint: object = None): @@ -235,7 +236,7 @@ async def clear_key(self, key, key_hint: object = None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_clear_key_async(conn, self._cache_id, key, key_hint=key_hint) + return await cache_clear_key_async(conn, self.cache_info, key, key_hint=key_hint) @status_to_exception(CacheError) async def clear_keys(self, keys: Iterable): @@ -245,7 +246,7 @@ async def clear_keys(self, keys: Iterable): :param keys: a list of keys or (key, type hint) tuples """ conn = await self._get_best_node() - return await cache_clear_keys_async(conn, self._cache_id, keys) + return await cache_clear_keys_async(conn, self.cache_info, keys) @status_to_exception(CacheError) async def contains_key(self, key, key_hint=None) -> bool: @@ -261,7 +262,7 @@ async def contains_key(self, key, key_hint=None) -> bool: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_contains_key_async(conn, self._cache_id, key, key_hint=key_hint) + return await cache_contains_key_async(conn, self.cache_info, key, key_hint=key_hint) @status_to_exception(CacheError) async def contains_keys(self, keys: Iterable) -> bool: @@ -272,7 +273,7 @@ async def contains_keys(self, keys: Iterable) -> bool: :return: boolean `True` when all keys are present, `False` otherwise. """ conn = await self._get_best_node() - return await cache_contains_keys_async(conn, self._cache_id, keys) + return await cache_contains_keys_async(conn, self.cache_info, keys) @status_to_exception(CacheError) async def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: @@ -292,7 +293,7 @@ async def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_put_async(conn, self._cache_id, key, value, key_hint, value_hint) + result = await cache_get_and_put_async(conn, self.cache_info, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -315,7 +316,7 @@ async def get_and_put_if_absent(self, key, value, key_hint=None, value_hint=None key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) + result = await cache_get_and_put_if_absent_async(conn, self.cache_info, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -336,7 +337,7 @@ async def put_if_absent(self, key, value, key_hint=None, value_hint=None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_put_if_absent_async(conn, self._cache_id, key, value, key_hint, value_hint) + return await cache_put_if_absent_async(conn, self.cache_info, key, value, key_hint, value_hint) @status_to_exception(CacheError) async def get_and_remove(self, key, key_hint=None) -> Any: @@ -352,7 +353,7 @@ async def get_and_remove(self, key, key_hint=None) -> Any: key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_remove_async(conn, self._cache_id, key, key_hint) + result = await cache_get_and_remove_async(conn, self.cache_info, key, key_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -375,7 +376,7 @@ async def get_and_replace(self, key, value, key_hint=None, value_hint=None) -> A key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_get_and_replace_async(conn, self._cache_id, key, value, key_hint, value_hint) + result = await cache_get_and_replace_async(conn, self.cache_info, key, value, key_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -392,7 +393,7 @@ async def remove_key(self, key, key_hint=None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_remove_key_async(conn, self._cache_id, key, key_hint) + return await cache_remove_key_async(conn, self.cache_info, key, key_hint) @status_to_exception(CacheError) async def remove_keys(self, keys: list): @@ -403,7 +404,7 @@ async def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ conn = await self._get_best_node() - return await cache_remove_keys_async(conn, self._cache_id, keys) + return await cache_remove_keys_async(conn, self.cache_info, keys) @status_to_exception(CacheError) async def remove_all(self): @@ -411,7 +412,7 @@ async def remove_all(self): Removes all cache entries, notifying listeners and cache writers. """ conn = await self._get_best_node() - return await cache_remove_all_async(conn, self._cache_id) + return await cache_remove_all_async(conn, self.cache_info) @status_to_exception(CacheError) async def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @@ -430,7 +431,7 @@ async def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - return await cache_remove_if_equals_async(conn, self._cache_id, key, sample, key_hint, sample_hint) + return await cache_remove_if_equals_async(conn, self.cache_info, key, sample, key_hint, sample_hint) @status_to_exception(CacheError) async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint=None, value_hint=None) -> Any: @@ -453,7 +454,7 @@ async def replace_if_equals(self, key, sample, value, key_hint=None, sample_hint key_hint = AnyDataObject.map_python_type(key) conn = await self._get_best_node(key, key_hint) - result = await cache_replace_if_equals_async(conn, self._cache_id, key, sample, value, key_hint, sample_hint, + result = await cache_replace_if_equals_async(conn, self.cache_info, key, sample, value, key_hint, sample_hint, value_hint) result.value = await self.client.unwrap_binary(result.value) return result @@ -469,7 +470,7 @@ async def get_size(self, peek_modes=None): :return: integer number of cache entries. """ conn = await self._get_best_node() - return await cache_get_size_async(conn, self._cache_id, peek_modes) + return await cache_get_size_async(conn, self.cache_info, peek_modes) def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> AioScanCursor: """ @@ -484,4 +485,4 @@ def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> on local node only. Defaults to False, :return: async scan query cursor """ - return AioScanCursor(self.client, self._cache_id, page_size, partitions, local) + return AioScanCursor(self.client, self.cache_info, page_size, partitions, local) diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 7a5959d..b0498f7 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -29,6 +29,7 @@ from .constants import AFFINITY_RETRIES, AFFINITY_DELAY from .datatypes import BinaryObject from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors +from .queries.query import CacheInfo from .stream import AioBinaryStream, READ_BACKWARD from .utils import cache_id, entity_id, status_to_exception, is_wrapped @@ -452,20 +453,24 @@ def sql( :return: async sql fields cursor with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ + if isinstance(cache, (int, str)): + c_info = CacheInfo(cache_id=cache_id(cache), protocol_context=self.protocol_context) + elif isinstance(cache, AioCache): + c_info = cache.cache_info + else: + c_info = None - c_id = cache.cache_id if isinstance(cache, AioCache) else cache_id(cache) - - if c_id != 0: + if c_info: schema = None - return AioSqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, + return AioSqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout) def get_cluster(self) -> 'AioCluster': """ - Gets client cluster facade. + Get client cluster facade. - :return: AioClient cluster facade. + :return: :py:class:`~pyignite.aio_cluster.AioCluster` instance. """ return AioCluster(self) diff --git a/pyignite/aio_cluster.py b/pyignite/aio_cluster.py index 6d76125..afbc41b 100644 --- a/pyignite/aio_cluster.py +++ b/pyignite/aio_cluster.py @@ -18,6 +18,7 @@ whole cluster asynchronously. """ from pyignite.api.cluster import cluster_get_state_async, cluster_set_state_async +from pyignite.datatypes import ClusterState from pyignite.exceptions import ClusterError from pyignite.utils import status_to_exception @@ -30,27 +31,34 @@ class AioCluster: """ def __init__(self, client: 'AioClient'): + """ + :param client: :py:class:`~pyignite.aio_client.AioClient` instance. + """ self._client = client @status_to_exception(ClusterError) - async def get_state(self): + async def get_state(self) -> 'ClusterState': """ Gets current cluster state. - :return: Current cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :return: Current cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return await cluster_get_state_async(await self._client.random_node()) @status_to_exception(ClusterError) - async def set_state(self, state): + async def set_state(self, state: 'ClusterState'): """ Changes current cluster state to the given. Note: Deactivation clears in-memory caches (without persistence) including the system caches. - :param state: New cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :param state: New cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return await cluster_set_state_async(await self._client.random_node(), state) diff --git a/pyignite/api/cache_config.py b/pyignite/api/cache_config.py index 0adb549..7f2869b 100644 --- a/pyignite/api/cache_config.py +++ b/pyignite/api/cache_config.py @@ -26,9 +26,9 @@ from typing import Union from pyignite.connection import Connection, AioConnection -from pyignite.datatypes.cache_config import cache_config_struct +from pyignite.datatypes.cache_config import get_cache_config_struct from pyignite.datatypes.cache_properties import prop_map -from pyignite.datatypes import Int, Byte, prop_codes, Short, String, StringArray +from pyignite.datatypes import Int, prop_codes, Short, String, StringArray from pyignite.queries import Query, ConfigQuery, query_perform from pyignite.queries.op_codes import ( OP_CACHE_GET_CONFIGURATION, OP_CACHE_CREATE_WITH_NAME, OP_CACHE_GET_OR_CREATE_WITH_NAME, OP_CACHE_DESTROY, @@ -37,6 +37,9 @@ from pyignite.utils import cache_id from .result import APIResult +from ..datatypes.prop_codes import PROP_EXPIRY_POLICY +from ..exceptions import NotSupportedByClusterError +from ..queries.query import CacheInfo def compact_cache_config(cache_config: dict) -> dict: @@ -57,29 +60,27 @@ def compact_cache_config(cache_config: dict) -> dict: return result -def cache_get_configuration(connection: 'Connection', cache: Union[str, int], - flags: int = 0, query_id=None) -> 'APIResult': +def cache_get_configuration(connection: 'Connection', cache_info: CacheInfo, query_id=None) -> 'APIResult': """ Gets configuration for the given cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, - :param flags: Ignite documentation is unclear on this subject, + :param cache_info: cache meta info, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Result value is OrderedDict with the cache configuration parameters. """ - return __cache_get_configuration(connection, cache, flags, query_id) + return __cache_get_configuration(connection, cache_info, query_id) -async def cache_get_configuration_async(connection: 'AioConnection', cache: Union[str, int], - flags: int = 0, query_id=None) -> 'APIResult': +async def cache_get_configuration_async( + connection: 'AioConnection', cache_info: CacheInfo, query_id=None) -> 'APIResult': """ Async version of cache_get_configuration. """ - return await __cache_get_configuration(connection, cache, flags, query_id) + return await __cache_get_configuration(connection, cache_info, query_id) def __post_process_cache_config(result): @@ -88,22 +89,20 @@ def __post_process_cache_config(result): return result -def __cache_get_configuration(connection, cache, flags, query_id): +def __cache_get_configuration(connection, cache_info, query_id): query_struct = Query( OP_CACHE_GET_CONFIGURATION, [ - ('hash_code', Int), - ('flags', Byte), + ('cache_info', CacheInfo) ], query_id=query_id, ) return query_perform(query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flags': flags + 'cache_info': cache_info }, response_config=[ - ('cache_config', cache_config_struct) + ('cache_config', get_cache_config_struct(connection.protocol_context)) ], post_process_fun=__post_process_cache_config ) @@ -184,9 +183,9 @@ async def cache_destroy_async(connection: 'AioConnection', cache: Union[str, int def __cache_destroy(connection, cache, query_id): - query_struct = Query(OP_CACHE_DESTROY, [('hash_code', Int)], query_id=query_id) + query_struct = Query(OP_CACHE_DESTROY, [('cache_id', Int)], query_id=query_id) - return query_perform(query_struct, connection, query_params={'hash_code': cache_id(cache)}) + return query_perform(query_struct, connection, query_params={'cache_id': cache_id(cache)}) def cache_get_names(connection: 'Connection', query_id=None) -> 'APIResult': @@ -278,8 +277,12 @@ async def cache_get_or_create_with_config_async(connection: 'AioConnection', cac def __cache_create_with_config(op_code, connection, cache_props, query_id): prop_types, prop_values = {}, {} + is_expiry_policy_supported = connection.protocol_context.is_expiry_policy_supported() for i, prop_item in enumerate(cache_props.items()): prop_code, prop_value = prop_item + if prop_code == PROP_EXPIRY_POLICY and not is_expiry_policy_supported: + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + prop_name = 'property_{}'.format(i) prop_types[prop_name] = prop_map(prop_code) prop_values[prop_name] = prop_value diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 9fb13bb..5038051 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -23,54 +23,51 @@ OP_CACHE_CLEAR_KEYS, OP_CACHE_REMOVE_KEY, OP_CACHE_REMOVE_IF_EQUALS, OP_CACHE_REMOVE_KEYS, OP_CACHE_REMOVE_ALL, OP_CACHE_GET_SIZE, OP_CACHE_LOCAL_PEEK ) -from pyignite.datatypes import Map, Bool, Byte, Int, Long, AnyDataArray, AnyDataObject, ByteArray +from pyignite.datatypes import Map, Bool, Long, AnyDataArray, AnyDataObject, ByteArray from pyignite.datatypes.base import IgniteDataType from pyignite.queries import Query, query_perform -from pyignite.utils import cache_id from .result import APIResult +from ..queries.query import CacheInfo -def cache_put(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache (overwriting existing value if any). :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status if a value is written, non-zero status and an error description otherwise. """ - return __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_put_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +async def cache_put_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put """ - return await __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_put(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_PUT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -79,50 +76,45 @@ def __cache_put(connection, cache, key, value, key_hint, value_hint, binary, que return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value } ) -def cache_get(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_get(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Retrieves a value from cache by key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a value retrieved on success, non-zero status and an error description on failure. """ - return __cache_get(connection, cache, key, key_hint, binary, query_id) + return __cache_get(connection, cache_info, key, key_hint, query_id) -async def cache_get_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_get_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get """ - return await __cache_get(connection, cache, key, key_hint, binary, query_id) + return await __cache_get(connection, cache_info, key, key_hint, query_id) -def __cache_get(connection, cache, key, key_hint, binary, query_id): +def __cache_get(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_GET, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -130,8 +122,7 @@ def __cache_get(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -141,16 +132,14 @@ def __cache_get(connection, cache, key, key_hint, binary, query_id): ) -def cache_get_all(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, +def cache_get_all(connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Retrieves multiple key-value pairs from cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -158,23 +147,22 @@ def cache_get_all(connection: 'Connection', cache: Union[str, int], keys: Iterab retrieved key-value pairs, non-zero status and an error description on failure. """ - return __cache_get_all(connection, cache, keys, binary, query_id) + return __cache_get_all(connection, cache_info, keys, query_id) -async def cache_get_all_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, binary: bool = False, +async def cache_get_all_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_all. """ - return await __cache_get_all(connection, cache, keys, binary, query_id) + return await __cache_get_all(connection, cache_info, keys, query_id) -def __cache_get_all(connection, cache, keys, binary, query_id): +def __cache_get_all(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_GET_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -182,8 +170,7 @@ def __cache_get_all(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, response_config=[ @@ -193,42 +180,39 @@ def __cache_get_all(connection, cache, keys, binary, query_id): ) -def cache_put_all(connection: 'Connection', cache: Union[str, int], pairs: dict, binary: bool = False, +def cache_put_all(connection: 'Connection', cache_info: CacheInfo, pairs: dict, query_id: Optional[int] = None) -> 'APIResult': """ Puts multiple key-value pairs to cache (overwriting existing associations if any). :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param pairs: dictionary type parameters, contains key-value pairs to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status if key-value pairs are written, non-zero status and an error description otherwise. """ - return __cache_put_all(connection, cache, pairs, binary, query_id) + return __cache_put_all(connection, cache_info, pairs, query_id) -async def cache_put_all_async(connection: 'AioConnection', cache: Union[str, int], pairs: dict, binary: bool = False, +async def cache_put_all_async(connection: 'AioConnection', cache_info: CacheInfo, pairs: dict, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put_all. """ - return await __cache_put_all(connection, cache, pairs, binary, query_id) + return await __cache_put_all(connection, cache_info, pairs, query_id) -def __cache_put_all(connection, cache, pairs, binary, query_id): +def __cache_put_all(connection, cache_info, pairs, query_id): query_struct = Query( OP_CACHE_PUT_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('data', Map), ], query_id=query_id, @@ -236,25 +220,22 @@ def __cache_put_all(connection, cache, pairs, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'data': pairs, }, ) -def cache_contains_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_contains_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether given key is present in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -262,24 +243,22 @@ def cache_contains_key(connection: 'Connection', cache: Union[str, int], key: An retrieved on success: `True` when key is present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_key(connection, cache, key, key_hint, binary, query_id) + return __cache_contains_key(connection, cache_info, key, key_hint, query_id) -async def cache_contains_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_contains_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_contains_key. """ - return await __cache_contains_key(connection, cache, key, key_hint, binary, query_id) + return await __cache_contains_key(connection, cache_info, key, key_hint, query_id) -def __cache_contains_key(connection, cache, key, key_hint, binary, query_id): +def __cache_contains_key(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -287,8 +266,7 @@ def __cache_contains_key(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -298,16 +276,14 @@ def __cache_contains_key(connection, cache, key, key_hint, binary, query_id): ) -def cache_contains_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, +def cache_contains_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Returns a value indicating whether all given keys are present in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: a list of keys or (key, type hint) tuples, - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -315,23 +291,22 @@ def cache_contains_keys(connection: 'Connection', cache: Union[str, int], keys: retrieved on success: `True` when all keys are present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_keys(connection, cache, keys, binary, query_id) + return __cache_contains_keys(connection, cache_info, keys, query_id) -async def cache_contains_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +async def cache_contains_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_contains_keys. """ - return await __cache_contains_keys(connection, cache, keys, binary, query_id) + return await __cache_contains_keys(connection, cache_info, keys, query_id) -def __cache_contains_keys(connection, cache, keys, binary, query_id): +def __cache_contains_keys(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -339,8 +314,7 @@ def __cache_contains_keys(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, response_config=[ @@ -350,23 +324,21 @@ def __cache_contains_keys(connection, cache, keys, binary, query_id): ) -def cache_get_and_put(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_get_and_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ - Puts a value with a given key to cache, and returns the previous value + Puts a value with a given key to cache_info, and returns the previous value for that key, or null value if there was not such key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -374,24 +346,23 @@ def cache_get_and_put(connection: 'Connection', cache: Union[str, int], key: Any or None if a value is written, non-zero status and an error description in case of error. """ - return __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_get_and_put_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_get_and_put_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_put. """ - return await __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -400,8 +371,7 @@ def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, bin return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -412,8 +382,8 @@ def __cache_get_and_put(connection, cache, key, value, key_hint, value_hint, bin ) -def cache_get_and_replace(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_get_and_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache, returning previous value @@ -421,38 +391,35 @@ def cache_get_and_replace(connection: 'Connection', cache: Union[str, int], key: for that key. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id) + return __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id) -async def cache_get_and_replace_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_get_and_replace_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_replace. """ - return await __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id) + return await __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id) -def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, binary, query_id): +def __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -461,8 +428,7 @@ def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -473,38 +439,35 @@ def __cache_get_and_replace(connection, cache, key, key_hint, value, value_hint, ) -def cache_get_and_remove(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_get_and_remove(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Removes the cache entry with specified key, returning the value. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and an old value or None, non-zero status and an error description otherwise. """ - return __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id) + return __cache_get_and_remove(connection, cache_info, key, key_hint, query_id) -async def cache_get_and_remove_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': - return await __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id) +async def cache_get_and_remove_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': + return await __cache_get_and_remove(connection, cache_info, key, key_hint, query_id) -def __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id): +def __cache_get_and_remove(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -512,8 +475,7 @@ def __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -523,47 +485,44 @@ def __cache_get_and_remove(connection, cache, key, key_hint, binary, query_id): ) -def cache_put_if_absent(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, +def cache_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. False - by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_put_if_absent_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_put_if_absent_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_put_if_absent. """ - return await __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -572,8 +531,7 @@ def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, b return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -584,47 +542,44 @@ def __cache_put_if_absent(connection, cache, key, value, key_hint, value_hint, b ) -def cache_get_and_put_if_absent(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, +def cache_get_and_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: (optional) pass True to keep the value in binary form. False - by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_get_and_put_if_absent. """ - return await __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -633,8 +588,7 @@ def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -645,22 +599,20 @@ def __cache_get_and_put_if_absent(connection, cache, key, value, key_hint, value ) -def cache_replace(connection: 'Connection', cache: Union[str, int], key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, binary: bool = False, +def cache_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exist. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param value: value for the key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param binary: pass True to keep the value in binary form. False - by default, :param query_id: a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -668,24 +620,23 @@ def cache_replace(connection: 'Connection', cache: Union[str, int], key: Any, va success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id) -async def cache_replace_async(connection: 'AioConnection', cache: Union[str, int], key: Any, value: Any, +async def cache_replace_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_replace. """ - return await __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id) + return await __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id) -def __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, query_id): +def __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id): query_struct = Query( OP_CACHE_REPLACE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], @@ -694,8 +645,7 @@ def __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'value': value, }, @@ -706,16 +656,15 @@ def __cache_replace(connection, cache, key, value, key_hint, value_hint, binary, ) -def cache_replace_if_equals(connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, value: Any, +def cache_replace_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, - value_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': + value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exists and value equals provided sample. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param sample: a sample to compare the stored value with, :param value: new value for the given key, @@ -725,8 +674,6 @@ def cache_replace_if_equals(connection: 'Connection', cache: Union[str, int], ke the given sample should be converted :param value_hint: (optional) Ignite data type, for which the given value should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -734,28 +681,26 @@ def cache_replace_if_equals(connection: 'Connection', cache: Union[str, int], ke success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, binary, - query_id) + return __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, + sample_hint, value_hint, query_id) async def cache_replace_if_equals_async( - connection: 'AioConnection', cache: Union[str, int], key: Any, sample: Any, value: Any, + connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_replace_if_equals. """ - return await __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, - binary, query_id) + return await __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, + sample_hint, value_hint, query_id) -def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, sample_hint, value_hint, binary, - query_id): +def __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, sample_hint, value_hint, query_id): query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), @@ -765,8 +710,7 @@ def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, s return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'sample': sample, 'value': value, @@ -778,86 +722,77 @@ def __cache_replace_if_equals(connection, cache, key, sample, value, key_hint, s ) -def cache_clear(connection: 'Connection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_clear(connection: 'Connection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, - :param binary: (optional) pass True to keep the value in binary form. - False by default, + :param cache_info: cache meta info, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear(connection, cache, binary, query_id) + return __cache_clear(connection, cache_info, query_id) -async def cache_clear_async(connection: 'AioConnection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_async( + connection: 'AioConnection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_clear. """ - return await __cache_clear(connection, cache, binary, query_id) + return await __cache_clear(connection, cache_info, query_id) -def __cache_clear(connection, cache, binary, query_id): +def __cache_clear(connection, cache_info, query_id): query_struct = Query( OP_CACHE_CLEAR, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ], query_id=query_id, ) return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, }, ) -def cache_clear_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_clear_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_key(connection, cache, key, key_hint, binary, query_id) + return __cache_clear_key(connection, cache_info, key, key_hint, query_id) -async def cache_clear_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_clear_key. """ - return await __cache_clear_key(connection, cache, key, key_hint, binary, query_id) + return await __cache_clear_key(connection, cache_info, key, key_hint, query_id) -def __cache_clear_key(connection, cache, key, key_hint, binary, query_id): +def __cache_clear_key(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_CLEAR_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -865,46 +800,43 @@ def __cache_clear_key(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, ) -def cache_clear_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_clear_keys( + connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache keys without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_keys(connection, cache, keys, binary, query_id) + return __cache_clear_keys(connection, cache_info, keys, query_id) -async def cache_clear_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_keys_async( + connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None +) -> 'APIResult': """ Async version of cache_clear_keys. """ - return await __cache_clear_keys(connection, cache, keys, binary, query_id) + return await __cache_clear_keys(connection, cache_info, keys, query_id) -def __cache_clear_keys(connection, cache, keys, binary, query_id): +def __cache_clear_keys(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_CLEAR_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -912,25 +844,22 @@ def __cache_clear_keys(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, ) -def cache_remove_key(connection: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + query_id: Optional[int] = None) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -938,24 +867,22 @@ def cache_remove_key(connection: 'Connection', cache: Union[str, int], key: Any, success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_key(connection, cache, key, key_hint, binary, query_id) + return __cache_remove_key(connection, cache_info, key, key_hint, query_id) -async def cache_remove_key_async(connection: 'AioConnection', cache: Union[str, int], key: Any, - key_hint: 'IgniteDataType' = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_key. """ - return await __cache_remove_key(connection, cache, key, key_hint, binary, query_id) + return await __cache_remove_key(connection, cache_info, key, key_hint, query_id) -def __cache_remove_key(connection, cache, key, key_hint, binary, query_id): +def __cache_remove_key(connection, cache_info, key, key_hint, query_id): query_struct = Query( OP_CACHE_REMOVE_KEY, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ], query_id=query_id, @@ -963,8 +890,7 @@ def __cache_remove_key(connection, cache, key, key_hint, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, }, response_config=[ @@ -974,23 +900,21 @@ def __cache_remove_key(connection, cache, key, key_hint, binary, query_id): ) -def cache_remove_if_equals(connection: 'Connection', cache: Union[str, int], key: Any, sample: Any, +def cache_remove_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + query_id: Optional[int] = None) -> 'APIResult': """ Removes an entry with a given key if provided value is equal to actual value, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: key for the cache entry, :param sample: a sample to compare the stored value with, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param sample_hint: (optional) Ignite data type, for whic the given sample should be converted - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -998,24 +922,23 @@ def cache_remove_if_equals(connection: 'Connection', cache: Union[str, int], key success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id) + return __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id) async def cache_remove_if_equals_async( - connection: 'AioConnection', cache: Union[str, int], key: Any, sample: Any, key_hint: 'IgniteDataType' = None, - sample_hint: 'IgniteDataType' = None, binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': + connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, + sample_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_if_equals. """ - return await __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id) + return await __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id) -def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hint, binary, query_id): +def __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id): query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ], @@ -1024,8 +947,7 @@ def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hi return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'sample': sample, }, @@ -1036,39 +958,37 @@ def __cache_remove_if_equals(connection, cache, key, sample, key_hint, sample_hi ) -def cache_remove_keys(connection: 'Connection', cache: Union[str, int], keys: Iterable, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_keys( + connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': """ Removes entries with given keys, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_keys(connection, cache, keys, binary, query_id) + return __cache_remove_keys(connection, cache_info, keys, query_id) -async def cache_remove_keys_async(connection: 'AioConnection', cache: Union[str, int], keys: Iterable, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_keys_async( + connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None +) -> 'APIResult': """ Async version of cache_remove_keys. """ - return await __cache_remove_keys(connection, cache, keys, binary, query_id) + return await __cache_remove_keys(connection, cache_info, keys, query_id) -def __cache_remove_keys(connection, cache, keys, binary, query_id): +def __cache_remove_keys(connection, cache_info, keys, query_id): query_struct = Query( OP_CACHE_REMOVE_KEYS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('keys', AnyDataArray()), ], query_id=query_id, @@ -1076,69 +996,61 @@ def __cache_remove_keys(connection, cache, keys, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'keys': keys, }, ) -def cache_remove_all(connection: 'Connection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_all(connection: 'Connection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ - Removes all entries from cache, notifying listeners and cache writers. + Removes all entries from cache_info, notifying listeners and cache writers. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, - :param binary: (optional) pass True to keep the value in binary form. - False by default, + :param cache_info: cache meta info, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_all(connection, cache, binary, query_id) + return __cache_remove_all(connection, cache_info, query_id) -async def cache_remove_all_async(connection: 'AioConnection', cache: Union[str, int], binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_all_async( + connection: 'AioConnection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': """ Async version of cache_remove_all. """ - return await __cache_remove_all(connection, cache, binary, query_id) + return await __cache_remove_all(connection, cache_info, query_id) -def __cache_remove_all(connection, cache, binary, query_id): +def __cache_remove_all(connection, cache_info, query_id): query_struct = Query( OP_CACHE_REMOVE_ALL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ], query_id=query_id, ) return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, }, ) -def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: Union[int, list, tuple] = None, - binary: bool = False, query_id: Optional[int] = None) -> 'APIResult': +def cache_get_size(connection: 'Connection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None, + query_id: Optional[int] = None) -> 'APIResult': """ Gets the number of entries in cache. :param connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to pimary cache partitions (PeekModes.PRIMARY), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -1146,16 +1058,17 @@ def cache_get_size(connection: 'Connection', cache: Union[str, int], peek_modes: cache entries on success, non-zero status and an error description otherwise. """ - return __cache_get_size(connection, cache, peek_modes, binary, query_id) + return __cache_get_size(connection, cache_info, peek_modes, query_id) -async def cache_get_size_async(connection: 'AioConnection', cache: Union[str, int], - peek_modes: Union[int, list, tuple] = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': - return await __cache_get_size(connection, cache, peek_modes, binary, query_id) +async def cache_get_size_async( + connection: 'AioConnection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None, + query_id: Optional[int] = None +) -> 'APIResult': + return await __cache_get_size(connection, cache_info, peek_modes, query_id) -def __cache_get_size(connection, cache, peek_modes, binary, query_id): +def __cache_get_size(connection, cache_info, peek_modes, query_id): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1164,8 +1077,7 @@ def __cache_get_size(connection, cache, peek_modes, binary, query_id): query_struct = Query( OP_CACHE_GET_SIZE, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('peek_modes', ByteArray), ], query_id=query_id, @@ -1173,8 +1085,7 @@ def __cache_get_size(connection, cache, peek_modes, binary, query_id): return query_perform( query_struct, connection, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'peek_modes': peek_modes, }, response_config=[ @@ -1184,9 +1095,8 @@ def __cache_get_size(connection, cache, peek_modes, binary, query_id): ) -def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': +def cache_local_peek(conn: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None) -> 'APIResult': """ Peeks at in-memory cached value using default optional peek mode. @@ -1194,35 +1104,33 @@ def cache_local_peek(conn: 'Connection', cache: Union[str, int], key: Any, key_h node. :param conn: connection: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info, :param key: entry key, :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, :return: API result data object. Contains zero status and a peeked value (null if not found). """ - return __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) + return __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id) async def cache_local_peek_async( - conn: 'AioConnection', cache: Union[str, int], key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = None, binary: bool = False, - query_id: Optional[int] = None) -> 'APIResult': + conn: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None +) -> 'APIResult': """ Async version of cache_local_peek. """ - return await __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) + return await __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id) -def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id): +def __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1231,8 +1139,7 @@ def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) query_struct = Query( OP_CACHE_LOCAL_PEEK, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('peek_modes', ByteArray), ], @@ -1241,8 +1148,7 @@ def __cache_local_peek(conn, cache, key, key_hint, peek_modes, binary, query_id) return query_perform( query_struct, conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'key': key, 'peek_modes': peek_modes, }, diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index b10cc7d..267bc5b 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -13,35 +13,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union - from pyignite.connection import AioConnection, Connection -from pyignite.datatypes import AnyDataArray, AnyDataObject, Bool, Byte, Int, Long, Map, Null, String, StructArray +from pyignite.datatypes import AnyDataArray, AnyDataObject, Bool, Int, Long, Map, Null, String, StructArray from pyignite.datatypes.sql import StatementType from pyignite.queries import Query, query_perform from pyignite.queries.op_codes import ( OP_QUERY_SCAN, OP_QUERY_SCAN_CURSOR_GET_PAGE, OP_QUERY_SQL, OP_QUERY_SQL_CURSOR_GET_PAGE, OP_QUERY_SQL_FIELDS, OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, OP_RESOURCE_CLOSE ) -from pyignite.utils import cache_id, deprecated +from pyignite.utils import deprecated from .result import APIResult +from ..queries.query import CacheInfo from ..queries.response import SQLResponse -def scan(conn: 'Connection', cache: Union[str, int], page_size: int, partitions: int = -1, local: bool = False, - binary: bool = False, query_id: int = None) -> APIResult: +def scan(conn: 'Connection', cache_info: CacheInfo, page_size: int, partitions: int = -1, local: bool = False, + query_id: int = None) -> APIResult: """ Performs scan query. :param conn: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: cache meta info. :param page_size: cursor page size, :param partitions: (optional) number of partitions to query (negative to query entire cache), :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -56,15 +53,15 @@ def scan(conn: 'Connection', cache: Union[str, int], page_size: int, partitions: * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ - return __scan(conn, cache, page_size, partitions, local, binary, query_id) + return __scan(conn, cache_info, page_size, partitions, local, query_id) -async def scan_async(conn: 'AioConnection', cache: Union[str, int], page_size: int, partitions: int = -1, - local: bool = False, binary: bool = False, query_id: int = None) -> APIResult: +async def scan_async(conn: 'AioConnection', cache_info: CacheInfo, page_size: int, partitions: int = -1, + local: bool = False, query_id: int = None) -> APIResult: """ Async version of scan. """ - return await __scan(conn, cache, page_size, partitions, local, binary, query_id) + return await __scan(conn, cache_info, page_size, partitions, local, query_id) def __query_result_post_process(result): @@ -73,12 +70,11 @@ def __query_result_post_process(result): return result -def __scan(conn, cache, page_size, partitions, local, binary, query_id): +def __scan(conn, cache_info, page_size, partitions, local, query_id): query_struct = Query( OP_QUERY_SCAN, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('filter', Null), ('page_size', Int), ('partitions', Int), @@ -89,8 +85,7 @@ def __scan(conn, cache, page_size, partitions, local, binary, query_id): return query_perform( query_struct, conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'filter': None, 'page_size': page_size, 'partitions': partitions, @@ -156,18 +151,17 @@ def __scan_cursor_get_page(conn, cursor, query_id): @deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " "Use sql_fields instead") def sql( - conn: 'Connection', cache: Union[str, int], + conn: 'Connection', cache_info: CacheInfo, table_name: str, query_str: str, page_size: int, query_args=None, distributed_joins: bool = False, replicated_only: bool = False, - local: bool = False, timeout: int = 0, binary: bool = False, - query_id: int = None + local: bool = False, timeout: int = 0, query_id: int = None ) -> APIResult: """ Executes an SQL query over data stored in the cluster. The query returns the whole record (key and value). :param conn: connection to Ignite server, - :param cache: name or ID of the cache, + :param cache_info: Cache meta info, :param table_name: name of a type or SQL table, :param query_str: SQL query string, :param page_size: cursor page size, @@ -179,8 +173,6 @@ def sql( on local node only. Defaults to False, :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :param binary: (optional) pass True to keep the value in binary form. - False by default, :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -202,8 +194,7 @@ def sql( query_struct = Query( OP_QUERY_SQL, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('table_name', String), ('query_str', String), ('query_args', AnyDataArray()), @@ -218,8 +209,7 @@ def sql( result = query_struct.perform( conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'table_name': table_name, 'query_str': query_str, 'query_args': query_args, @@ -287,19 +277,19 @@ def sql_cursor_get_page( def sql_fields( - conn: 'Connection', cache: Union[str, int], + conn: 'Connection', cache_info: CacheInfo, query_str: str, page_size: int, query_args=None, schema: str = None, statement_type: int = StatementType.ANY, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, - timeout: int = 0, binary: bool = False, query_id: int = None + timeout: int = 0, query_id: int = None ) -> APIResult: """ Performs SQL fields query. :param conn: connection to Ignite server, - :param cache: name or ID of the cache. If zero, then schema is used. + :param cache_info: cache meta info. :param query_str: SQL query string, :param page_size: cursor page size, :param query_args: (optional) query arguments. List of values or @@ -307,9 +297,9 @@ def sql_fields( :param schema: schema for the query. :param statement_type: (optional) statement type. Can be: - * StatementType.ALL − any type (default), + * StatementType.ALL − any type (default), * StatementType.SELECT − select, - * StatementType.UPDATE − update. + * StatementType.UPDATE − update. :param distributed_joins: (optional) distributed joins. :param local: (optional) pass True if this query should be executed @@ -323,7 +313,6 @@ def sql_fields( :param max_rows: (optional) query-wide maximum of rows. :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout. - :param binary: (optional) pass True to keep the value in binary form. :param query_id: (optional) a value generated by client and returned as-is in response.query_id. When the parameter is omitted, a random value is generated, @@ -338,39 +327,39 @@ def sql_fields( * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ - return __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, + return __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, - timeout, binary, query_id) + timeout, query_id) async def sql_fields_async( - conn: 'AioConnection', cache: Union[str, int], + conn: 'AioConnection', cache_info: CacheInfo, query_str: str, page_size: int, query_args=None, schema: str = None, statement_type: int = StatementType.ANY, distributed_joins: bool = False, local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, - timeout: int = 0, binary: bool = False, query_id: int = None + timeout: int = 0, query_id: int = None ) -> APIResult: """ Async version of sql_fields. """ - return await __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, - local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, - max_rows, timeout, binary, query_id) + return await __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, + include_field_names, max_rows, timeout, query_id) -def __sql_fields(conn, cache, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, - replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, - binary, query_id): +def __sql_fields( + conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, + replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, query_id +): if query_args is None: query_args = [] query_struct = Query( OP_QUERY_SQL_FIELDS, [ - ('hash_code', Int), - ('flag', Byte), + ('cache_info', CacheInfo), ('schema', String), ('page_size', Int), ('max_rows', Int), @@ -393,8 +382,7 @@ def __sql_fields(conn, cache, query_str, page_size, query_args, schema, statemen return query_perform( query_struct, conn, query_params={ - 'hash_code': cache_id(cache), - 'flag': 1 if binary else 0, + 'cache_info': cache_info, 'schema': schema, 'page_size': page_size, 'max_rows': max_rows, diff --git a/pyignite/cache.py b/pyignite/cache.py index 3c93637..a2444a4 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -15,9 +15,10 @@ from typing import Any, Iterable, Optional, Tuple, Union -from .datatypes import prop_codes +from .datatypes import prop_codes, ExpiryPolicy from .datatypes.internal import AnyDataObject -from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError +from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError, NotSupportedByClusterError +from .queries.query import CacheInfo from .utils import cache_id, status_to_exception from .api.cache_config import ( cache_create, cache_create_with_config, cache_get_or_create, cache_get_or_create_with_config, cache_destroy, @@ -93,12 +94,14 @@ def __parse_settings(settings: Union[str, dict]) -> Tuple[Optional[str], Optiona class BaseCache: - def __init__(self, client: 'BaseClient', name: str): + def __init__(self, client: 'BaseClient', name: str, expiry_policy: ExpiryPolicy = None): self._client = client self._name = name self._settings = None - self._cache_id = cache_id(self._name) - self._client.register_cache(self._cache_id) + self._cache_info = CacheInfo(cache_id=cache_id(self._name), + protocol_context=client.protocol_context, + expiry_policy=expiry_policy) + self._client.register_cache(self.cache_info.cache_id) @property def name(self) -> str: @@ -114,6 +117,13 @@ def client(self) -> 'BaseClient': """ return self._client + @property + def cache_info(self) -> CacheInfo: + """ + Cache meta info. + """ + return self._cache_info + @property def cache_id(self) -> int: """ @@ -121,7 +131,30 @@ def cache_id(self) -> int: :return: integer value of the cache ID. """ - return self._cache_id + return self._cache_info.cache_id + + def with_expire_policy( + self, expiry_policy: Optional[ExpiryPolicy] = None, + create: Union[int, float] = ExpiryPolicy.UNCHANGED, + update: Union[int, float] = ExpiryPolicy.UNCHANGED, + access: Union[int, float] = ExpiryPolicy.UNCHANGED + ): + """ + :param expiry_policy: optional :class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy` + object. If it is set, other params will be ignored. + :param create: create TTL in seconds (float) or milliseconds (int), + :param update: Create TTL in seconds (float) or milliseconds (int), + :param access: Create TTL in seconds (float) or milliseconds (int). + :return: cache decorator with expiry policy set. + """ + if not self.client.protocol_context.is_expiry_policy_supported(): + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + + cache_cls = type(self) + if not expiry_policy: + expiry_policy = ExpiryPolicy(create=create, update=update, access=access) + + return cache_cls(self.client, self.name, expiry_policy) class Cache(BaseCache): @@ -134,17 +167,17 @@ class Cache(BaseCache): :ref:`this example ` on how to do it. """ - def __init__(self, client: 'Client', name: str): + def __init__(self, client: 'Client', name: str, expiry_policy: ExpiryPolicy = None): """ Initialize cache object. For internal use. :param client: Ignite client, :param name: Cache name. """ - super().__init__(client, name) + super().__init__(client, name, expiry_policy) def _get_best_node(self, key=None, key_hint=None): - return self.client.get_best_node(self._cache_id, key, key_hint) + return self.client.get_best_node(self, key, key_hint) @property def settings(self) -> Optional[dict]: @@ -159,7 +192,7 @@ def settings(self) -> Optional[dict]: if self._settings is None: config_result = cache_get_configuration( self._get_best_node(), - self._cache_id + self.cache_info ) if config_result.status == 0: self._settings = config_result.value @@ -173,7 +206,7 @@ def destroy(self): """ Destroys cache with a given name. """ - return cache_destroy(self._get_best_node(), self._cache_id) + return cache_destroy(self._get_best_node(), self.cache_id) @status_to_exception(CacheError) def get(self, key, key_hint: object = None) -> Any: @@ -190,7 +223,7 @@ def get(self, key, key_hint: object = None) -> Any: result = cache_get( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint=key_hint ) @@ -198,9 +231,7 @@ def get(self, key, key_hint: object = None) -> Any: return result @status_to_exception(CacheError) - def put( - self, key, value, key_hint: object = None, value_hint: object = None - ): + def put(self, key, value, key_hint: object = None, value_hint: object = None): """ Puts a value with a given key to cache (overwriting existing value if any). @@ -217,7 +248,7 @@ def put( return cache_put( self._get_best_node(key, key_hint), - self._cache_id, key, value, + self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint ) @@ -229,7 +260,7 @@ def get_all(self, keys: list) -> list: :param keys: list of keys or tuples of (key, key_hint), :return: a dict of key-value pairs. """ - result = cache_get_all(self._get_best_node(), self._cache_id, keys) + result = cache_get_all(self._get_best_node(), self.cache_info, keys) if result.value: for key, value in result.value.items(): result.value[key] = self.client.unwrap_binary(value) @@ -245,7 +276,7 @@ def put_all(self, pairs: dict): to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), """ - return cache_put_all(self._get_best_node(), self._cache_id, pairs) + return cache_put_all(self._get_best_node(), self.cache_info, pairs) @status_to_exception(CacheError) def replace( @@ -266,7 +297,7 @@ def replace( result = cache_replace( self._get_best_node(key, key_hint), - self._cache_id, key, value, + self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint ) result.value = self.client.unwrap_binary(result.value) @@ -282,9 +313,9 @@ def clear(self, keys: Optional[list] = None): """ conn = self._get_best_node() if keys: - return cache_clear_keys(conn, self._cache_id, keys) + return cache_clear_keys(conn, self.cache_info, keys) else: - return cache_clear(conn, self._cache_id) + return cache_clear(conn, self.cache_info) @status_to_exception(CacheError) def clear_key(self, key, key_hint: object = None): @@ -300,7 +331,7 @@ def clear_key(self, key, key_hint: object = None): return cache_clear_key( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint=key_hint ) @@ -313,7 +344,7 @@ def clear_keys(self, keys: Iterable): :param keys: a list of keys or (key, type hint) tuples """ - return cache_clear_keys(self._get_best_node(), self._cache_id, keys) + return cache_clear_keys(self._get_best_node(), self.cache_info, keys) @status_to_exception(CacheError) def contains_key(self, key, key_hint=None) -> bool: @@ -330,7 +361,7 @@ def contains_key(self, key, key_hint=None) -> bool: return cache_contains_key( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint=key_hint ) @@ -343,7 +374,7 @@ def contains_keys(self, keys: Iterable) -> bool: :param keys: a list of keys or (key, type hint) tuples, :return: boolean `True` when all keys are present, `False` otherwise. """ - return cache_contains_keys(self._get_best_node(), self._cache_id, keys) + return cache_contains_keys(self._get_best_node(), self.cache_info, keys) @status_to_exception(CacheError) def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: @@ -364,7 +395,7 @@ def get_and_put(self, key, value, key_hint=None, value_hint=None) -> Any: result = cache_get_and_put( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -392,7 +423,7 @@ def get_and_put_if_absent( result = cache_get_and_put_if_absent( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -417,7 +448,7 @@ def put_if_absent(self, key, value, key_hint=None, value_hint=None): return cache_put_if_absent( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -437,7 +468,7 @@ def get_and_remove(self, key, key_hint=None) -> Any: result = cache_get_and_remove( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, key_hint ) @@ -466,7 +497,7 @@ def get_and_replace( result = cache_get_and_replace( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, value, key_hint, value_hint ) @@ -486,7 +517,7 @@ def remove_key(self, key, key_hint=None): key_hint = AnyDataObject.map_python_type(key) return cache_remove_key( - self._get_best_node(key, key_hint), self._cache_id, key, key_hint + self._get_best_node(key, key_hint), self.cache_info, key, key_hint ) @status_to_exception(CacheError) @@ -498,7 +529,7 @@ def remove_keys(self, keys: list): :param keys: list of keys or tuples of (key, key_hint) to remove. """ return cache_remove_keys( - self._get_best_node(), self._cache_id, keys + self._get_best_node(), self.cache_info, keys ) @status_to_exception(CacheError) @@ -506,7 +537,7 @@ def remove_all(self): """ Removes all cache entries, notifying listeners and cache writers. """ - return cache_remove_all(self._get_best_node(), self._cache_id) + return cache_remove_all(self._get_best_node(), self.cache_info) @status_to_exception(CacheError) def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): @@ -526,7 +557,7 @@ def remove_if_equals(self, key, sample, key_hint=None, sample_hint=None): return cache_remove_if_equals( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, sample, key_hint, sample_hint ) @@ -556,7 +587,7 @@ def replace_if_equals( result = cache_replace_if_equals( self._get_best_node(key, key_hint), - self._cache_id, + self.cache_info, key, sample, value, key_hint, sample_hint, value_hint ) @@ -574,7 +605,7 @@ def get_size(self, peek_modes=None): :return: integer number of cache entries. """ return cache_get_size( - self._get_best_node(), self._cache_id, peek_modes + self._get_best_node(), self.cache_info, peek_modes ) def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> ScanCursor: @@ -590,7 +621,7 @@ def scan(self, page_size: int = 1, partitions: int = -1, local: bool = False) -> on local node only. Defaults to False, :return: Scan query cursor. """ - return ScanCursor(self.client, self._cache_id, page_size, partitions, local) + return ScanCursor(self.client, self.cache_info, page_size, partitions, local) def select_row( self, query_str: str, page_size: int = 1, @@ -621,5 +652,5 @@ def select_row( if not type_name: raise SQLError('Value type is unknown') - return SqlCursor(self.client, self._cache_id, type_name, query_str, page_size, query_args, + return SqlCursor(self.client, self.cache_info, type_name, query_str, page_size, query_args, distributed_joins, replicated_only, local, timeout) diff --git a/pyignite/client.py b/pyignite/client.py index 17e9d80..099b44d 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -58,6 +58,7 @@ from .datatypes.base import IgniteDataType from .datatypes.internal import tc_map from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors +from .queries.query import CacheInfo from .stream import BinaryStream, READ_BACKWARD from .utils import ( cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, is_wrapped, @@ -719,20 +720,24 @@ def sql( :return: sql fields cursor with result rows as a lists. If `include_field_names` was set, the first row will hold field names. """ + if isinstance(cache, (int, str)): + c_info = CacheInfo(cache_id=cache_id(cache), protocol_context=self.protocol_context) + elif isinstance(cache, Cache): + c_info = cache.cache_info + else: + c_info = None - c_id = cache.cache_id if isinstance(cache, Cache) else cache_id(cache) - - if c_id != 0: + if c_info: schema = None - return SqlFieldsCursor(self, c_id, query_str, page_size, query_args, schema, statement_type, distributed_joins, - local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, - max_rows, timeout) + return SqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, + distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, + include_field_names, max_rows, timeout) def get_cluster(self) -> 'Cluster': """ - Gets client cluster facade. + Get client cluster facade. - :return: Client cluster facade. + :return: :py:class:`~pyignite.cluster.Cluster` instance. """ return Cluster(self) diff --git a/pyignite/cluster.py b/pyignite/cluster.py index f10afe4..d953b5c 100644 --- a/pyignite/cluster.py +++ b/pyignite/cluster.py @@ -20,6 +20,7 @@ from pyignite.api.cluster import cluster_get_state, cluster_set_state from pyignite.exceptions import ClusterError from pyignite.utils import status_to_exception +from pyignite.datatypes import ClusterState class Cluster: @@ -30,27 +31,34 @@ class Cluster: """ def __init__(self, client: 'Client'): + """ + :param client: :py:class:`~pyignite.client.Client` instance. + """ self._client = client @status_to_exception(ClusterError) - def get_state(self): + def get_state(self) -> 'ClusterState': """ Gets current cluster state. - :return: Current cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :return: Current cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return cluster_get_state(self._client.random_node) @status_to_exception(ClusterError) - def set_state(self, state): + def set_state(self, state: 'ClusterState'): """ Changes current cluster state to the given. Note: Deactivation clears in-memory caches (without persistence) including the system caches. - :param state: New cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. + :param state: New cluster state. This is one of + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.INACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE`, + :py:attr:`~pyignite.datatypes.cluster_state.ClusterState.ACTIVE_READ_ONLY`. """ return cluster_set_state(self._client.random_node, state) diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py index 54f5240..be23e56 100644 --- a/pyignite/connection/protocol_context.py +++ b/pyignite/connection/protocol_context.py @@ -98,3 +98,6 @@ def is_cluster_api_supported(self) -> bool: Check whether cluster API supported by the current protocol. """ return self.features and BitmaskFeature.CLUSTER_API in self.features + + def is_expiry_policy_supported(self) -> bool: + return self.version >= (1, 6, 0) diff --git a/pyignite/cursors.py b/pyignite/cursors.py index 0a8f0b0..a690d94 100644 --- a/pyignite/cursors.py +++ b/pyignite/cursors.py @@ -64,15 +64,15 @@ def more(self, value): setattr(self, '_more', value) @property - def cache_id(self): + def cache_info(self): """ Cache id. """ - return getattr(self, '_cache_id', None) + return getattr(self, '_cache_info', None) - @cache_id.setter - def cache_id(self, value): - setattr(self, '_cache_id', value) + @cache_info.setter + def cache_info(self, value): + setattr(self, '_cache_info', value) @property def client(self): @@ -134,9 +134,9 @@ async def close(self): class AbstractScanCursor: - def __init__(self, client, cache_id, page_size, partitions, local): + def __init__(self, client, cache_info, page_size, partitions, local): self.client = client - self.cache_id = cache_id + self.cache_info = cache_info self._page_size = page_size self._partitions = partitions self._local = local @@ -159,18 +159,18 @@ class ScanCursor(AbstractScanCursor, CursorMixin): """ Synchronous scan cursor. """ - def __init__(self, client, cache_id, page_size, partitions, local): + def __init__(self, client, cache_info, page_size, partitions, local): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. :param page_size: page size. :param partitions: number of partitions to query (negative to query entire cache). :param local: pass True if this query should be executed on local node only. """ - super().__init__(client, cache_id, page_size, partitions, local) + super().__init__(client, cache_info, page_size, partitions, local) self.connection = self.client.random_node - result = scan(self.connection, self.cache_id, self._page_size, self._partitions, self._local) + result = scan(self.connection, self.cache_info, self._page_size, self._partitions, self._local) self._finalize_init(result) def __next__(self): @@ -193,20 +193,20 @@ class AioScanCursor(AbstractScanCursor, AioCursorMixin): """ Asynchronous scan query cursor. """ - def __init__(self, client, cache_id, page_size, partitions, local): + def __init__(self, client, cache_info, page_size, partitions, local): """ :param client: Asynchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. :param page_size: page size. :param partitions: number of partitions to query (negative to query entire cache). :param local: pass True if this query should be executed on local node only. """ - super().__init__(client, cache_id, page_size, partitions, local) + super().__init__(client, cache_info, page_size, partitions, local) async def __aenter__(self): if not self.connection: self.connection = await self.client.random_node() - result = await scan_async(self.connection, self.cache_id, self._page_size, self._partitions, self._local) + result = await scan_async(self.connection, self.cache_info, self._page_size, self._partitions, self._local) self._finalize_init(result) return self @@ -238,15 +238,15 @@ class SqlCursor(CursorMixin): """ Synchronous SQL query cursor. """ - def __init__(self, client, cache_id, *args, **kwargs): + def __init__(self, client, cache_info, *args, **kwargs): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. """ self.client = client - self.cache_id = cache_id + self.cache_info = cache_info self.connection = self.client.random_node - result = sql(self.connection, self.cache_id, *args, **kwargs) + result = sql(self.connection, self.cache_info, *args, **kwargs) if result.status != 0: raise SQLError(result.message) @@ -274,9 +274,9 @@ def __next__(self): class AbstractSqlFieldsCursor: - def __init__(self, client, cache_id): + def __init__(self, client, cache_info): self.client = client - self.cache_id = cache_id + self.cache_info = cache_info def _finalize_init(self, result): if result.status != 0: @@ -295,14 +295,14 @@ class SqlFieldsCursor(AbstractSqlFieldsCursor, CursorMixin): """ Synchronous SQL fields query cursor. """ - def __init__(self, client, cache_id, *args, **kwargs): + def __init__(self, client, cache_info, *args, **kwargs): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. """ - super().__init__(client, cache_id) + super().__init__(client, cache_info) self.connection = self.client.random_node - self._finalize_init(sql_fields(self.connection, self.cache_id, *args, **kwargs)) + self._finalize_init(sql_fields(self.connection, self.cache_info, *args, **kwargs)) def __next__(self): if not self.data: @@ -334,12 +334,12 @@ class AioSqlFieldsCursor(AbstractSqlFieldsCursor, AioCursorMixin): """ Asynchronous SQL fields query cursor. """ - def __init__(self, client, cache_id, *args, **kwargs): + def __init__(self, client, cache_info, *args, **kwargs): """ :param client: Synchronous Apache Ignite client. - :param cache_id: Cache id. + :param cache_info: Cache meta info. """ - super().__init__(client, cache_id) + super().__init__(client, cache_info) self._params = (args, kwargs) async def __aenter__(self): @@ -381,4 +381,4 @@ async def _initialize(self, *args, **kwargs): return self.connection = await self.client.random_node() - self._finalize_init(await sql_fields_async(self.connection, self.cache_id, *args, **kwargs)) + self._finalize_init(await sql_fields_async(self.connection, self.cache_info, *args, **kwargs)) diff --git a/pyignite/datatypes/__init__.py b/pyignite/datatypes/__init__.py index 5024f79..4f78dce 100644 --- a/pyignite/datatypes/__init__.py +++ b/pyignite/datatypes/__init__.py @@ -25,3 +25,5 @@ from .primitive_arrays import * from .primitive_objects import * from .standard import * +from .cluster_state import ClusterState +from .expiry_policy import ExpiryPolicy diff --git a/pyignite/datatypes/cache_config.py b/pyignite/datatypes/cache_config.py index 04ff607..a2b4322 100644 --- a/pyignite/datatypes/cache_config.py +++ b/pyignite/datatypes/cache_config.py @@ -12,14 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from . import ExpiryPolicy from .standard import String from .internal import AnyDataObject, Struct, StructArray from .primitive import * __all__ = [ - 'cache_config_struct', 'CacheMode', 'PartitionLossPolicy', + 'get_cache_config_struct', 'CacheMode', 'PartitionLossPolicy', 'RebalanceMode', 'WriteSynchronizationMode', 'IndexType', ] @@ -118,36 +118,40 @@ class CacheAtomicityMode(Int): ]) -cache_config_struct = Struct([ - ('length', Int), - ('cache_atomicity_mode', CacheAtomicityMode), - ('backups_number', Int), - ('cache_mode', CacheMode), - ('copy_on_read', Bool), - ('data_region_name', String), - ('eager_ttl', Bool), - ('statistics_enabled', Bool), - ('group_name', String), - ('default_lock_timeout', Long), - ('max_concurrent_async_operations', Int), - ('max_query_iterators', Int), - ('name', String), - ('is_onheap_cache_enabled', Bool), - ('partition_loss_policy', PartitionLossPolicy), - ('query_detail_metric_size', Int), - ('query_parallelism', Int), - ('read_from_backup', Bool), - ('rebalance_batch_size', Int), - ('rebalance_batches_prefetch_count', Long), - ('rebalance_delay', Long), - ('rebalance_mode', RebalanceMode), - ('rebalance_order', Int), - ('rebalance_throttle', Long), - ('rebalance_timeout', Long), - ('sql_escape_all', Bool), - ('sql_index_inline_max_size', Int), - ('sql_schema', String), - ('write_synchronization_mode', WriteSynchronizationMode), - ('cache_key_configuration', CacheKeyConfiguration), - ('query_entities', QueryEntities), -]) +def get_cache_config_struct(protocol_context): + fields = [ + ('length', Int), + ('cache_atomicity_mode', CacheAtomicityMode), + ('backups_number', Int), + ('cache_mode', CacheMode), + ('copy_on_read', Bool), + ('data_region_name', String), + ('eager_ttl', Bool), + ('statistics_enabled', Bool), + ('group_name', String), + ('default_lock_timeout', Long), + ('max_concurrent_async_operations', Int), + ('max_query_iterators', Int), + ('name', String), + ('is_onheap_cache_enabled', Bool), + ('partition_loss_policy', PartitionLossPolicy), + ('query_detail_metric_size', Int), + ('query_parallelism', Int), + ('read_from_backup', Bool), + ('rebalance_batch_size', Int), + ('rebalance_batches_prefetch_count', Long), + ('rebalance_delay', Long), + ('rebalance_mode', RebalanceMode), + ('rebalance_order', Int), + ('rebalance_throttle', Long), + ('rebalance_timeout', Long), + ('sql_escape_all', Bool), + ('sql_index_inline_max_size', Int), + ('sql_schema', String), + ('write_synchronization_mode', WriteSynchronizationMode), + ('cache_key_configuration', CacheKeyConfiguration), + ('query_entities', QueryEntities), + ] + if protocol_context.is_expiry_policy_supported(): + fields.append(('expiry_policy', ExpiryPolicy)) + return Struct(fields=fields) diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index 9bf34de..a1766f3 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -15,6 +15,7 @@ import ctypes +from . import ExpiryPolicy from .prop_codes import * from .cache_config import ( CacheMode, CacheAtomicityMode, PartitionLossPolicy, RebalanceMode, @@ -34,7 +35,7 @@ 'PropRebalanceOrder', 'PropRebalanceThrottle', 'PropGroupName', 'PropCacheKeyConfiguration', 'PropDefaultLockTimeout', 'PropMaxConcurrentAsyncOperation', 'PropPartitionLossPolicy', - 'PropEagerTTL', 'PropStatisticsEnabled', 'prop_map', 'AnyProperty', + 'PropEagerTTL', 'PropStatisticsEnabled', 'PropExpiryPolicy', 'prop_map', 'AnyProperty', ] @@ -70,6 +71,7 @@ def prop_map(code: int): PROP_PARTITION_LOSS_POLICY: PropPartitionLossPolicy, PROP_EAGER_TTL: PropEagerTTL, PROP_STATISTICS_ENABLED: PropStatisticsEnabled, + PROP_EXPIRY_POLICY: PropExpiryPolicy, }[code] @@ -285,6 +287,11 @@ class PropStatisticsEnabled(PropBase): prop_data_class = Bool +class PropExpiryPolicy(PropBase): + prop_code = PROP_EXPIRY_POLICY + prop_data_class = ExpiryPolicy + + class AnyProperty(PropBase): @classmethod diff --git a/pyignite/datatypes/cluster_state.py b/pyignite/datatypes/cluster_state.py index 863a1d2..def5591 100644 --- a/pyignite/datatypes/cluster_state.py +++ b/pyignite/datatypes/cluster_state.py @@ -17,6 +17,10 @@ class ClusterState(IntEnum): + """ + Cluster states. + """ + #: Cluster deactivated. Cache operations aren't allowed. INACTIVE = 0 diff --git a/pyignite/datatypes/expiry_policy.py b/pyignite/datatypes/expiry_policy.py new file mode 100644 index 0000000..3572754 --- /dev/null +++ b/pyignite/datatypes/expiry_policy.py @@ -0,0 +1,110 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import ctypes +from io import SEEK_CUR +from typing import Union + +import attr + +from pyignite.constants import PROTOCOL_BYTE_ORDER + + +def _positive(_, attrib, value): + if value < 0 and value not in [ExpiryPolicy.UNCHANGED, ExpiryPolicy.ETERNAL]: + raise ValueError(f"'{attrib.name}' value must not be negative") + + +def _write_duration(stream, value): + if isinstance(value, float): + value = int(value * 1000) + + stream.write(value.to_bytes(8, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + + +@attr.s +class ExpiryPolicy: + """ + Set expiry policy for the cache. + """ + #: Set TTL unchanged. + UNCHANGED = -2 + + #: Set TTL eternal. + ETERNAL = -1 + + #: Set TTL for create in seconds(float) or millis(int) + create = attr.ib(kw_only=True, default=UNCHANGED, + validator=[attr.validators.instance_of((int, float)), _positive]) + + #: Set TTL for update in seconds(float) or millis(int) + update = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], + validator=[attr.validators.instance_of((int, float)), _positive]) + + #: Set TTL for access in seconds(float) or millis(int) + access = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], + validator=[attr.validators.instance_of((int, float)), _positive]) + + class _CType(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('not_null', ctypes.c_byte), + ('create', ctypes.c_longlong), + ('update', ctypes.c_longlong), + ('access', ctypes.c_longlong) + ] + + @classmethod + def parse(cls, stream): + init = stream.tell() + not_null = int.from_bytes(stream.slice(init, 1), byteorder=PROTOCOL_BYTE_ORDER) + if not_null: + stream.seek(ctypes.sizeof(ExpiryPolicy._CType), SEEK_CUR) + return ExpiryPolicy._CType + stream.seek(ctypes.sizeof(ctypes.c_byte), SEEK_CUR) + return ctypes.c_byte + + @classmethod + async def parse_async(cls, stream): + return cls.parse(stream) + + @classmethod + def to_python(cls, ctypes_object): + if ctypes_object == 0: + return None + + return ExpiryPolicy(create=ctypes_object.create, update=ctypes_object.update, access=ctypes_object.access) + + @classmethod + async def to_python_async(cls, ctypes_object): + return cls.to_python(ctypes_object) + + @classmethod + def from_python(cls, stream, value): + if not value: + stream.write(b'\x00') + return + + stream.write(b'\x01') + cls.write_policy(stream, value) + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + @classmethod + def write_policy(cls, stream, value): + _write_duration(stream, value.create) + _write_duration(stream, value.update) + _write_duration(stream, value.access) diff --git a/pyignite/datatypes/prop_codes.py b/pyignite/datatypes/prop_codes.py index 72ffce1..9709313 100644 --- a/pyignite/datatypes/prop_codes.py +++ b/pyignite/datatypes/prop_codes.py @@ -47,3 +47,4 @@ PROP_PARTITION_LOSS_POLICY = 404 PROP_EAGER_TTL = 405 PROP_STATISTICS_ENABLED = 406 +PROP_EXPIRY_POLICY = 407 diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index 8dac64f..d971eef 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -21,7 +21,10 @@ from pyignite.api.result import APIResult from pyignite.connection import Connection, AioConnection -from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED +from pyignite.connection.protocol_context import ProtocolContext +from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED, PROTOCOL_BYTE_ORDER +from pyignite.datatypes import ExpiryPolicy +from pyignite.exceptions import NotSupportedByClusterError from pyignite.queries.response import Response from pyignite.stream import AioBinaryStream, BinaryStream, READ_BACKWARD @@ -44,6 +47,34 @@ def _internal(): return _internal() +@attr.s +class CacheInfo: + cache_id = attr.ib(kw_only=True, type=int) + expiry_policy = attr.ib(kw_only=True, type=ExpiryPolicy, default=None) + protocol_context = attr.ib(kw_only=True, type=ProtocolContext) + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + @classmethod + def from_python(cls, stream, value): + cache_id = value.cache_id if value else 0 + expiry_policy = value.expiry_policy if value else None + flags = 0 + + stream.write(cache_id.to_bytes(4, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + + if expiry_policy: + if not value.protocol_context.is_expiry_policy_supported(): + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + flags |= 0x04 + + stream.write(flags.to_bytes(1, byteorder=PROTOCOL_BYTE_ORDER)) + if expiry_policy: + ExpiryPolicy.write_policy(stream, expiry_policy) + + @attr.s class Query: op_code = attr.ib(type=int) diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py index e23e0e6..da645c1 100644 --- a/tests/affinity/conftest.py +++ b/tests/affinity/conftest.py @@ -66,7 +66,6 @@ async def async_client(connection_param): @pytest.fixture(scope='module', autouse=True) def skip_if_no_affinity(request, server1): client = Client(partition_aware=True) - client.connect('127.0.0.1', 10801) - - if not client.partition_awareness_supported_by_protocol: - pytest.skip(f'skipped {request.node.name}, partition awareness is not supported.') + with client.connect('127.0.0.1', 10801): + if not client.partition_awareness_supported_by_protocol: + pytest.skip(f'skipped {request.node.name}, partition awareness is not supported.') diff --git a/tests/affinity/test_affinity.py b/tests/affinity/test_affinity.py index 64b9cc5..3097991 100644 --- a/tests/affinity/test_affinity.py +++ b/tests/affinity/test_affinity.py @@ -312,7 +312,7 @@ def inner(): best_node = client.get_best_node(cache, key, key_hint=key_hint) for node in filter(lambda n: n.alive, client._nodes): - result = cache_local_peek(node, cache.cache_id, key, key_hint=key_hint) + result = cache_local_peek(node, cache.cache_info, key, key_hint=key_hint) check_peek_value(node, best_node, result) @@ -321,7 +321,7 @@ async def inner_async(): best_node = await client.get_best_node(cache, key, key_hint=key_hint) for node in filter(lambda n: n.alive, client._nodes): - result = await cache_local_peek_async(node, cache.cache_id, key, key_hint=key_hint) + result = await cache_local_peek_async(node, cache.cache_info, key, key_hint=key_hint) check_peek_value(node, best_node, result) diff --git a/tests/common/conftest.py b/tests/common/conftest.py index 243d822..0f28f7e 100644 --- a/tests/common/conftest.py +++ b/tests/common/conftest.py @@ -70,3 +70,14 @@ def cache(client): yield cache finally: cache.destroy() + + +@pytest.fixture(autouse=True) +def expiry_policy_supported(request, server1): + client = Client() + with client.connect('127.0.0.1', 10801): + result = client.protocol_context.is_expiry_policy_supported() + if not result and request.node.get_closest_marker('skip_if_no_expiry_policy'): + pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') + + return result diff --git a/tests/common/test_binary.py b/tests/common/test_binary.py index 1d7192f..c94c4d5 100644 --- a/tests/common/test_binary.py +++ b/tests/common/test_binary.py @@ -74,7 +74,7 @@ def table_cache_read(client): cache = client.get_cache(table_cache_name) yield cache - cache.destroy() + client.sql(drop_query) @pytest.fixture @@ -87,7 +87,7 @@ async def table_cache_read_async(async_client): cache = await async_client.get_cache(table_cache_name) yield cache - await cache.destroy() + await async_client.sql(drop_query) def test_sql_read_as_binary(table_cache_read): diff --git a/tests/common/test_cache_config.py b/tests/common/test_cache_config.py index e68eef5..e5ed33c 100644 --- a/tests/common/test_cache_config.py +++ b/tests/common/test_cache_config.py @@ -28,7 +28,7 @@ PROP_PARTITION_LOSS_POLICY, PROP_EAGER_TTL, PROP_STATISTICS_ENABLED, PROP_REBALANCE_MODE, PROP_REBALANCE_DELAY, PROP_REBALANCE_TIMEOUT, PROP_REBALANCE_BATCH_SIZE, PROP_REBALANCE_BATCHES_PREFETCH_COUNT, PROP_REBALANCE_ORDER, PROP_REBALANCE_THROTTLE, PROP_QUERY_ENTITIES, PROP_QUERY_PARALLELISM, PROP_QUERY_DETAIL_METRIC_SIZE, - PROP_SQL_SCHEMA, PROP_SQL_INDEX_INLINE_MAX_SIZE, PROP_SQL_ESCAPE_ALL, PROP_MAX_QUERY_ITERATORS + PROP_SQL_SCHEMA, PROP_SQL_INDEX_INLINE_MAX_SIZE, PROP_SQL_ESCAPE_ALL, PROP_MAX_QUERY_ITERATORS, PROP_EXPIRY_POLICY ) from pyignite.exceptions import CacheError @@ -36,8 +36,8 @@ @pytest.fixture -def test_cache_settings(): - return { +def test_cache_settings(expiry_policy_supported): + settings = { PROP_NAME: cache_name, PROP_CACHE_MODE: CacheMode.PARTITIONED, PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL, @@ -96,6 +96,13 @@ def test_cache_settings(): PROP_STATISTICS_ENABLED: True } + if expiry_policy_supported: + settings[PROP_EXPIRY_POLICY] = None + elif 'PROP_EXPIRY_POLICY' in ALL_PROPS: + del ALL_PROPS['PROP_EXPIRY_POLICY'] + + return settings + @pytest.fixture def cache(client): diff --git a/tests/common/test_expiry_policy.py b/tests/common/test_expiry_policy.py new file mode 100644 index 0000000..cc852c7 --- /dev/null +++ b/tests/common/test_expiry_policy.py @@ -0,0 +1,171 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import time + +import pytest + +from pyignite.datatypes import ExpiryPolicy +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_EXPIRY_POLICY + + +@pytest.mark.skip_if_no_expiry_policy +def test_expiry_policy(cache): + ttl, num_retries = 0.6, 10 + cache_eternal = cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) + cache_created = cache.with_expire_policy(create=0.6) + cache_updated = cache.with_expire_policy(update=0.6) + cache_accessed = cache.with_expire_policy(access=0.6) + + for _ in range(num_retries): + cache.clear() + + start = time.time() + + cache_eternal.put(0, 0) + cache_created.put(1, 1) + cache_updated.put(2, 2) + cache_accessed.put(3, 3) + + time.sleep(ttl * 2 / 3) + + result = [cache.contains_key(k) for k in range(4)] + + if time.time() - start >= ttl: + continue + + assert all(result) + + start = time.time() + + cache_created.put(1, 2) # Check that update doesn't matter for created policy + cache_created.get(1) # Check that access doesn't matter for created policy + cache_updated.put(2, 3) # Check that update policy works. + cache_accessed.get(3) # Check that access policy works. + + time.sleep(ttl * 2 / 3) + + result = [cache.contains_key(k) for k in range(4)] + + if time.time() - start >= ttl: + continue + + assert result == [True, False, True, True] + + time.sleep(ttl * 2 / 3) + + cache_updated.get(2) # Check that access doesn't matter for updated policy. + + time.sleep(ttl * 2 / 3) + + result = [cache.contains_key(k) for k in range(0, 4)] + assert result == [True, False, False, False] + + +@pytest.mark.asyncio +@pytest.mark.skip_if_no_expiry_policy +async def test_expiry_policy_async(async_cache): + ttl, num_retries = 0.6, 10 + cache_eternal = async_cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) + cache_created = async_cache.with_expire_policy(create=0.6) + cache_updated = async_cache.with_expire_policy(update=0.6) + cache_accessed = async_cache.with_expire_policy(access=0.6) + + for _ in range(num_retries): + await async_cache.clear() + + start = time.time() + + await asyncio.gather( + cache_eternal.put(0, 0), + cache_created.put(1, 1), + cache_updated.put(2, 2), + cache_accessed.put(3, 3) + ) + + await asyncio.sleep(ttl * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + + if time.time() - start >= ttl: + continue + + assert all(result) + + start = time.time() + + await asyncio.gather( + cache_created.put(1, 2), # Check that update doesn't matter for created policy + cache_created.get(1), # Check that access doesn't matter for created policy + cache_updated.put(2, 3), # Check that update policy works. + cache_accessed.get(3) # Check that access policy works. + ) + + await asyncio.sleep(ttl * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + + if time.time() - start >= ttl: + continue + + assert result == [True, False, True, True] + + await asyncio.sleep(ttl * 2 / 3) + + cache_updated.get(2) # Check that access doesn't matter for updated policy. + + await asyncio.sleep(ttl * 2 / 3) + + result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) + assert result == [True, False, False, False] + +create_cache_with_expiry_params = ( + 'expiry_policy', + [ + None, + ExpiryPolicy(), + ExpiryPolicy(create=ExpiryPolicy.ETERNAL), + ExpiryPolicy(create=2000, update=4000, access=6000) + ] +) + + +@pytest.mark.parametrize(*create_cache_with_expiry_params) +@pytest.mark.skip_if_no_expiry_policy +def test_create_cache_with_expiry_policy(client, expiry_policy): + cache = client.create_cache({ + PROP_NAME: 'expiry_cache', + PROP_EXPIRY_POLICY: expiry_policy + }) + try: + settings = cache.settings + assert settings[PROP_EXPIRY_POLICY] == expiry_policy + finally: + cache.destroy() + + +@pytest.mark.parametrize(*create_cache_with_expiry_params) +@pytest.mark.skip_if_no_expiry_policy +@pytest.mark.asyncio +async def test_create_cache_with_expiry_policy_async(async_client, expiry_policy): + cache = await async_client.create_cache({ + PROP_NAME: 'expiry_cache', + PROP_EXPIRY_POLICY: expiry_policy + }) + try: + settings = await cache.settings() + assert settings[PROP_EXPIRY_POLICY] == expiry_policy + finally: + await cache.destroy() diff --git a/tests/conftest.py b/tests/conftest.py index 65134fd..1c65356 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -64,6 +64,7 @@ def pytest_addoption(parser): def pytest_configure(config): marker_docs = [ "skip_if_no_cext: mark test to run only if c extension is available", + "skip_if_no_expiry_policy: mark test to run only if expiry policy is supported by server", "examples: mark test to run only if --examples are set" ] diff --git a/tests/custom/test_cluster.py b/tests/custom/test_cluster.py index e82e238..f1ffcfd 100644 --- a/tests/custom/test_cluster.py +++ b/tests/custom/test_cluster.py @@ -19,7 +19,7 @@ from pyignite.exceptions import CacheError from tests.util import clear_ignite_work_dir, start_ignite_gen -from pyignite.datatypes.cluster_state import ClusterState +from pyignite.datatypes import ClusterState @pytest.fixture(params=['with-persistence', 'without-persistence']) @@ -44,6 +44,15 @@ def server2(with_persistence, cleanup): yield from start_ignite_gen(idx=2, use_persistence=with_persistence) +@pytest.fixture(autouse=True) +def cluster_api_supported(request, server1): + client = Client() + client.connect('127.0.0.1', 10801) + + if not client.protocol_context.is_cluster_api_supported(): + pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') + + def test_cluster_set_active(with_persistence): key = 42 val = 42 From 746dd1315a0a86d8f1c501d80f3a91dfafd0f648 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 13 May 2021 17:43:50 +0300 Subject: [PATCH 36/62] IGNITE-14705 Fix handling collections with binary objects - Fixes #37. --- pyignite/aio_client.py | 27 +++++++--- pyignite/client.py | 27 ++++++---- pyignite/datatypes/base.py | 12 ++--- pyignite/datatypes/cache_properties.py | 12 ++--- pyignite/datatypes/complex.py | 66 ++++++++++--------------- pyignite/datatypes/expiry_policy.py | 4 +- pyignite/datatypes/internal.py | 48 +++++++++--------- pyignite/datatypes/null_object.py | 18 +++---- pyignite/datatypes/primitive.py | 6 +-- pyignite/datatypes/primitive_arrays.py | 14 +++--- pyignite/datatypes/primitive_objects.py | 22 ++++----- pyignite/datatypes/standard.py | 38 +++++++------- pyignite/queries/response.py | 29 +++++------ pyignite/utils.py | 7 --- tests/common/test_binary.py | 56 ++++++++++++++++++++- tests/common/test_datatypes.py | 13 +++++ tests/common/test_key_value.py | 35 +++++++++++-- 17 files changed, 260 insertions(+), 174 deletions(-) diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index b0498f7..8c2ca56 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -31,7 +31,7 @@ from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors from .queries.query import CacheInfo from .stream import AioBinaryStream, READ_BACKWARD -from .utils import cache_id, entity_id, status_to_exception, is_wrapped +from .utils import cache_id, entity_id, status_to_exception __all__ = ['AioClient'] @@ -269,11 +269,24 @@ async def unwrap_binary(self, value: Any) -> Any: :return: the result of the Binary Object unwrapping with all other data left intact. """ - if is_wrapped(value): - blob, offset = value - with AioBinaryStream(self, blob) as stream: - data_class = await BinaryObject.parse_async(stream) - return await BinaryObject.to_python_async(stream.read_ctype(data_class, direction=READ_BACKWARD), self) + if isinstance(value, tuple) and len(value) == 2: + if type(value[0]) is bytes and type(value[1]) is int: + blob, offset = value + with AioBinaryStream(self, blob) as stream: + data_class = await BinaryObject.parse_async(stream) + return await BinaryObject.to_python_async(stream.read_ctype(data_class, direction=READ_BACKWARD), + client=self) + + if isinstance(value[0], int): + col_type, collection = value + if isinstance(collection, list): + coros = [self.unwrap_binary(v) for v in collection] + return col_type, await asyncio.gather(*coros) + + if isinstance(collection, dict): + coros = [asyncio.gather(self.unwrap_binary(k), self.unwrap_binary(v)) + for k, v in collection.items()] + return col_type, dict(await asyncio.gather(*coros)) return value @status_to_exception(CacheError) @@ -351,7 +364,7 @@ async def get_best_node( key, key_hint = self._get_affinity_key(c_id, key, key_hint) - hashcode = await key_hint.hashcode_async(key, self) + hashcode = await key_hint.hashcode_async(key, client=self) best_node = self._get_node_by_hashcode(c_id, hashcode, parts) if best_node: diff --git a/pyignite/client.py b/pyignite/client.py index 099b44d..01ee373 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -61,7 +61,7 @@ from .queries.query import CacheInfo from .stream import BinaryStream, READ_BACKWARD from .utils import ( - cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, is_wrapped, + cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, get_field_by_id, unsigned ) from .binary import GenericObjectMeta @@ -539,17 +539,26 @@ def query_binary_type(self, binary_type: Union[int, str], schema: Union[int, dic def unwrap_binary(self, value: Any) -> Any: """ - Detects and recursively unwraps Binary Object. + Detects and recursively unwraps Binary Object or collections of BinaryObject. - :param value: anything that could be a Binary Object, + :param value: anything that could be a Binary Object or collection of BinaryObject, :return: the result of the Binary Object unwrapping with all other data left intact. """ - if is_wrapped(value): - blob, offset = value - with BinaryStream(self, blob) as stream: - data_class = BinaryObject.parse(stream) - return BinaryObject.to_python(stream.read_ctype(data_class, direction=READ_BACKWARD), self) + if isinstance(value, tuple) and len(value) == 2: + if type(value[0]) is bytes and type(value[1]) is int: + blob, offset = value + with BinaryStream(self, blob) as stream: + data_class = BinaryObject.parse(stream) + return BinaryObject.to_python(stream.read_ctype(data_class, direction=READ_BACKWARD), client=self) + + if isinstance(value[0], int): + col_type, collection = value + if isinstance(collection, list): + return col_type, [self.unwrap_binary(v) for v in collection] + + if isinstance(collection, dict): + return col_type, {self.unwrap_binary(k): self.unwrap_binary(v) for k, v in collection.items()} return value @status_to_exception(CacheError) @@ -619,7 +628,7 @@ def get_best_node( return conn key, key_hint = self._get_affinity_key(c_id, key, key_hint) - hashcode = key_hint.hashcode(key, self) + hashcode = key_hint.hashcode(key, client=self) best_node = self._get_node_by_hashcode(c_id, hashcode, parts) if best_node: diff --git a/pyignite/datatypes/base.py b/pyignite/datatypes/base.py index 87b251c..5a4c780 100644 --- a/pyignite/datatypes/base.py +++ b/pyignite/datatypes/base.py @@ -48,11 +48,11 @@ class IgniteDataType(metaclass=IgniteDataTypeMeta): classes, both object and payload varieties. """ @classmethod - async def hashcode_async(cls, value, *args, **kwargs): - return cls.hashcode(value, *args, **kwargs) + async def hashcode_async(cls, value, **kwargs): + return cls.hashcode(value, **kwargs) @classmethod - def hashcode(cls, value, *args, **kwargs): + def hashcode(cls, value, **kwargs): return 0 @classmethod @@ -72,9 +72,9 @@ async def from_python_async(cls, stream, value, **kwargs): cls.from_python(stream, value, **kwargs) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): raise NotImplementedError @classmethod - async def to_python_async(cls, ctypes_object, *args, **kwargs): - return cls.to_python(ctypes_object, *args, **kwargs) + async def to_python_async(cls, ctypes_object, **kwargs): + return cls.to_python(ctypes_object, **kwargs) diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index a1766f3..49327a3 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -117,12 +117,12 @@ async def parse_async(cls, stream): return cls.parse(stream) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - return cls.prop_data_class.to_python(ctypes_object.data, *args, **kwargs) + def to_python(cls, ctypes_object, **kwargs): + return cls.prop_data_class.to_python(ctypes_object.data, **kwargs) @classmethod - async def to_python_async(cls, ctypes_object, *args, **kwargs): - return cls.to_python(ctypes_object, *args, **kwargs) + async def to_python_async(cls, ctypes_object, **kwargs): + return cls.to_python(ctypes_object, **kwargs) @classmethod def from_python(cls, stream, value): @@ -302,6 +302,6 @@ def from_python(cls, stream, value): ) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): prop_data_class = prop_map(ctypes_object.prop_code) - return prop_data_class.to_python(ctypes_object.data, *args, **kwargs) + return prop_data_class.to_python(ctypes_object.data, **kwargs) diff --git a/pyignite/datatypes/complex.py b/pyignite/datatypes/complex.py index 119c552..cddf743 100644 --- a/pyignite/datatypes/complex.py +++ b/pyignite/datatypes/complex.py @@ -90,22 +90,21 @@ def __build_final_class(cls, fields): ) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): result = [] for i in range(ctypes_object.length): result.append( AnyDataObject.to_python( - getattr(ctypes_object, f'element_{i}'), - *args, **kwargs + getattr(ctypes_object, f'element_{i}'), **kwargs ) ) return ctypes_object.type_id, result @classmethod - async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): + async def to_python_not_null_async(cls, ctypes_object, **kwargs): result = [ await AnyDataObject.to_python_async( - getattr(ctypes_object, f'element_{i}'), *args, **kwargs + getattr(ctypes_object, f'element_{i}'), **kwargs ) for i in range(ctypes_object.length)] return ctypes_object.type_id, result @@ -223,8 +222,6 @@ class CollectionObject(Nullable): _type_id = TYPE_COL _header_class = None type_code = TC_COLLECTION - pythonic = list - default = [] @classmethod def parse_not_null(cls, stream): @@ -271,7 +268,7 @@ def __build_final_class(cls, fields): @classmethod def to_python_not_null(cls, ctypes_object, *args, **kwargs): result = [ - AnyDataObject.to_python(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) + AnyDataObject.to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) for i in range(ctypes_object.length) ] return ctypes_object.type, result @@ -279,7 +276,7 @@ def to_python_not_null(cls, ctypes_object, *args, **kwargs): @classmethod async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): result_coro = [ - AnyDataObject.to_python_async(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) + AnyDataObject.to_python_async(getattr(ctypes_object, f'element_{i}'), **kwargs) for i in range(ctypes_object.length) ] @@ -361,35 +358,27 @@ def __build_final_class(cls, fields): ) @classmethod - def _to_python(cls, ctypes_object, *args, **kwargs): + def _to_python(cls, ctypes_object, **kwargs): map_cls = cls.__get_map_class(ctypes_object) result = map_cls() for i in range(0, ctypes_object.length << 1, 2): - k = AnyDataObject.to_python( - getattr(ctypes_object, f'element_{i}'), - *args, **kwargs - ) - v = AnyDataObject.to_python( - getattr(ctypes_object, f'element_{i + 1}'), - *args, **kwargs - ) + k = AnyDataObject.to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) + v = AnyDataObject.to_python(getattr(ctypes_object, f'element_{i + 1}'), **kwargs) result[k] = v return result @classmethod - async def _to_python_async(cls, ctypes_object, *args, **kwargs): + async def _to_python_async(cls, ctypes_object, **kwargs): map_cls = cls.__get_map_class(ctypes_object) kv_pairs_coro = [ asyncio.gather( AnyDataObject.to_python_async( - getattr(ctypes_object, f'element_{i}'), - *args, **kwargs + getattr(ctypes_object, f'element_{i}'), **kwargs ), AnyDataObject.to_python_async( - getattr(ctypes_object, f'element_{i + 1}'), - *args, **kwargs + getattr(ctypes_object, f'element_{i + 1}'), **kwargs ) ) for i in range(0, ctypes_object.length << 1, 2) ] @@ -449,12 +438,12 @@ def _parse_header(cls, stream): return [('length', ctypes.c_int)], length @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - return cls._to_python(ctypes_object, *args, **kwargs) + def to_python(cls, ctypes_object, **kwargs): + return cls._to_python(ctypes_object, **kwargs) @classmethod - async def to_python_async(cls, ctypes_object, *args, **kwargs): - return await cls._to_python_async(ctypes_object, *args, **kwargs) + async def to_python_async(cls, ctypes_object, **kwargs): + return await cls._to_python_async(ctypes_object, **kwargs) @classmethod def from_python(cls, stream, value, type_id=None): @@ -484,8 +473,6 @@ class MapObject(Nullable, _MapBase): _type_name = NAME_MAP _type_id = TYPE_MAP type_code = TC_MAP - pythonic = dict - default = {} @classmethod def parse_not_null(cls, stream): @@ -507,12 +494,12 @@ def _parse_header(cls, stream): return fields, length @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): - return ctypes_object.type, cls._to_python(ctypes_object, *args, **kwargs) + def to_python_not_null(cls, ctypes_object, **kwargs): + return ctypes_object.type, cls._to_python(ctypes_object, **kwargs) @classmethod - async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): - return ctypes_object.type, await cls._to_python_async(ctypes_object, *args, **kwargs) + async def to_python_not_null_async(cls, ctypes_object, **kwargs): + return ctypes_object.type, await cls._to_python_async(ctypes_object, **kwargs) @classmethod def from_python_not_null(cls, stream, value, **kwargs): @@ -557,7 +544,7 @@ class BinaryObject(Nullable): COMPACT_FOOTER = 0x0020 @classmethod - def hashcode(cls, value: object, client: Optional['Client']) -> int: + def hashcode(cls, value: object, client: Optional['Client'] = None) -> int: # binary objects's hashcode implementation is special in the sense # that you need to fully serialize the object to calculate # its hashcode @@ -568,7 +555,7 @@ def hashcode(cls, value: object, client: Optional['Client']) -> int: return value._hashcode @classmethod - async def hashcode_async(cls, value: object, client: Optional['AioClient']) -> int: + async def hashcode_async(cls, value: object, client: Optional['AioClient'] = None) -> int: if not value._hashcode and client: with AioBinaryStream(client) as stream: await value._from_python_async(stream, save_to_buf=True) @@ -680,7 +667,7 @@ def __build_final_class(cls, stream, header, header_class, object_fields, fields return final_class @classmethod - def to_python_not_null(cls, ctypes_object, client: 'Client' = None, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, client: 'Client' = None, **kwargs): type_id = ctypes_object.type_id if not client: raise ParseError(f'Can not query binary type {type_id}') @@ -692,14 +679,13 @@ def to_python_not_null(cls, ctypes_object, client: 'Client' = None, *args, **kwa for field_name, field_type in data_class.schema.items(): setattr( result, field_name, field_type.to_python( - getattr(ctypes_object.object_fields, field_name), - client, *args, **kwargs + getattr(ctypes_object.object_fields, field_name), client=client, **kwargs ) ) return result @classmethod - async def to_python_not_null_async(cls, ctypes_object, client: 'AioClient' = None, *args, **kwargs): + async def to_python_not_null_async(cls, ctypes_object, client: 'AioClient' = None, **kwargs): type_id = ctypes_object.type_id if not client: raise ParseError(f'Can not query binary type {type_id}') @@ -711,7 +697,7 @@ async def to_python_not_null_async(cls, ctypes_object, client: 'AioClient' = Non field_values = await asyncio.gather( *[ field_type.to_python_async( - getattr(ctypes_object.object_fields, field_name), client, *args, **kwargs + getattr(ctypes_object.object_fields, field_name), client=client, **kwargs ) for field_name, field_type in data_class.schema.items() ] diff --git a/pyignite/datatypes/expiry_policy.py b/pyignite/datatypes/expiry_policy.py index 3572754..d729da5 100644 --- a/pyignite/datatypes/expiry_policy.py +++ b/pyignite/datatypes/expiry_policy.py @@ -80,14 +80,14 @@ async def parse_async(cls, stream): return cls.parse(stream) @classmethod - def to_python(cls, ctypes_object): + def to_python(cls, ctypes_object, **kwargs): if ctypes_object == 0: return None return ExpiryPolicy(create=ctypes_object.create, update=ctypes_object.update, access=ctypes_object.access) @classmethod - async def to_python_async(cls, ctypes_object): + async def to_python_async(cls, ctypes_object, **kwargs): return cls.to_python(ctypes_object) @classmethod diff --git a/pyignite/datatypes/internal.py b/pyignite/datatypes/internal.py index 9bd1b76..54d72bf 100644 --- a/pyignite/datatypes/internal.py +++ b/pyignite/datatypes/internal.py @@ -136,15 +136,15 @@ async def parse_async(self, stream, context): return await self.var1.parse_async(stream) return await self.var2.parse_async(stream) - def to_python(self, ctypes_object, context, *args, **kwargs): + def to_python(self, ctypes_object, context, **kwargs): if self.predicate2(context): - return self.var1.to_python(ctypes_object, *args, **kwargs) - return self.var2.to_python(ctypes_object, *args, **kwargs) + return self.var1.to_python(ctypes_object, **kwargs) + return self.var2.to_python(ctypes_object, **kwargs) - async def to_python_async(self, ctypes_object, context, *args, **kwargs): + async def to_python_async(self, ctypes_object, context, **kwargs): if self.predicate2(context): - return await self.var1.to_python_async(ctypes_object, *args, **kwargs) - return await self.var2.to_python_async(ctypes_object, *args, **kwargs) + return await self.var1.to_python_async(ctypes_object, **kwargs) + return await self.var2.to_python_async(ctypes_object, **kwargs) @attr.s @@ -192,19 +192,17 @@ def build_c_type(fields): }, ) - def to_python(self, ctypes_object, *args, **kwargs): + def to_python(self, ctypes_object, **kwargs): length = getattr(ctypes_object, 'length', 0) return [ - Struct(self.following, dict_type=dict).to_python(getattr(ctypes_object, f'element_{i}'), - *args, **kwargs) + Struct(self.following, dict_type=dict).to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) for i in range(length) ] - async def to_python_async(self, ctypes_object, *args, **kwargs): + async def to_python_async(self, ctypes_object, **kwargs): length = getattr(ctypes_object, 'length', 0) result_coro = [ - Struct(self.following, dict_type=dict).to_python_async(getattr(ctypes_object, f'element_{i}'), - *args, **kwargs) + Struct(self.following, dict_type=dict).to_python_async(getattr(ctypes_object, f'element_{i}'), **kwargs) for i in range(length) ] return await asyncio.gather(*result_coro) @@ -284,21 +282,21 @@ def build_c_type(fields): }, ) - def to_python(self, ctypes_object, *args, **kwargs) -> Union[dict, OrderedDict]: + def to_python(self, ctypes_object, **kwargs) -> Union[dict, OrderedDict]: result = self.dict_type() for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) result[name] = c_type.to_python( getattr(ctypes_object, name), result, - *args, **kwargs + **kwargs ) if is_cond else c_type.to_python( getattr(ctypes_object, name), - *args, **kwargs + **kwargs ) return result - async def to_python_async(self, ctypes_object, *args, **kwargs) -> Union[dict, OrderedDict]: + async def to_python_async(self, ctypes_object, **kwargs) -> Union[dict, OrderedDict]: result = self.dict_type() for name, c_type in self.fields: is_cond = isinstance(c_type, Conditional) @@ -307,12 +305,12 @@ async def to_python_async(self, ctypes_object, *args, **kwargs) -> Union[dict, O value = await c_type.to_python_async( getattr(ctypes_object, name), result, - *args, **kwargs + **kwargs ) else: value = await c_type.to_python_async( getattr(ctypes_object, name), - *args, **kwargs + **kwargs ) result[name] = value return result @@ -394,14 +392,14 @@ def __data_class_parse(cls, stream): raise ParseError('Unknown type code: `{}`'.format(type_code)) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): data_class = cls.__data_class_from_ctype(ctypes_object) - return data_class.to_python(ctypes_object) + return data_class.to_python(ctypes_object, **kwargs) @classmethod - async def to_python_async(cls, ctypes_object, *args, **kwargs): + async def to_python_async(cls, ctypes_object, **kwargs): data_class = cls.__data_class_from_ctype(ctypes_object) - return await data_class.to_python_async(ctypes_object) + return await data_class.to_python_async(ctypes_object, **kwargs) @classmethod def __data_class_from_ctype(cls, ctypes_object): @@ -580,16 +578,16 @@ def build_c_type(self, fields): ) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): length = getattr(ctypes_object, "length", 0) return [ - super().to_python(getattr(ctypes_object, f'element_{i}'), *args, **kwargs) + super().to_python(getattr(ctypes_object, f'element_{i}'), **kwargs) for i in range(length) ] @classmethod - async def to_python_async(cls, ctypes_object, *args, **kwargs): + async def to_python_async(cls, ctypes_object, **kwargs): length = getattr(ctypes_object, "length", 0) values = asyncio.gather( diff --git a/pyignite/datatypes/null_object.py b/pyignite/datatypes/null_object.py index 8ac47b2..d51e5fb 100644 --- a/pyignite/datatypes/null_object.py +++ b/pyignite/datatypes/null_object.py @@ -57,7 +57,7 @@ def parse(cls, stream): return cls.build_c_type() @classmethod - def to_python(cls, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): return None @classmethod @@ -105,7 +105,7 @@ def from_python(cls, stream, value, **kwargs): if value is None: Null.from_python(stream) else: - cls.from_python_not_null(stream, value) + cls.from_python_not_null(stream, value, **kwargs) @classmethod async def from_python_async(cls, stream, value, **kwargs): @@ -115,26 +115,26 @@ async def from_python_async(cls, stream, value, **kwargs): await cls.from_python_not_null_async(stream, value, **kwargs) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): raise NotImplementedError @classmethod - async def to_python_not_null_async(cls, ctypes_object, *args, **kwargs): - return cls.to_python_not_null(ctypes_object, *args, **kwargs) + async def to_python_not_null_async(cls, ctypes_object, **kwargs): + return cls.to_python_not_null(ctypes_object, **kwargs) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): if cls.__is_null(ctypes_object): return None - return cls.to_python_not_null(ctypes_object, *args, **kwargs) + return cls.to_python_not_null(ctypes_object, **kwargs) @classmethod - async def to_python_async(cls, ctypes_object, *args, **kwargs): + async def to_python_async(cls, ctypes_object, **kwargs): if cls.__is_null(ctypes_object): return None - return await cls.to_python_not_null_async(ctypes_object, *args, **kwargs) + return await cls.to_python_not_null_async(ctypes_object, **kwargs) @classmethod def __check_null_input(cls, stream): diff --git a/pyignite/datatypes/primitive.py b/pyignite/datatypes/primitive.py index 037f680..2213f3d 100644 --- a/pyignite/datatypes/primitive.py +++ b/pyignite/datatypes/primitive.py @@ -52,7 +52,7 @@ def parse(cls, stream): return cls.c_type @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): return ctypes_object @@ -122,7 +122,7 @@ class Char(Primitive): c_type = ctypes.c_short @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): return ctypes_object.value.to_bytes( ctypes.sizeof(cls.c_type), byteorder=PROTOCOL_BYTE_ORDER @@ -147,7 +147,7 @@ class Bool(Primitive): c_type = ctypes.c_byte # Use c_byte because c_bool throws endianness conversion error on BE systems. @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): return ctypes_object != 0 @classmethod diff --git a/pyignite/datatypes/primitive_arrays.py b/pyignite/datatypes/primitive_arrays.py index e1d4289..fcf877c 100644 --- a/pyignite/datatypes/primitive_arrays.py +++ b/pyignite/datatypes/primitive_arrays.py @@ -67,7 +67,7 @@ def parse(cls, stream): return c_type @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): return [ctypes_object.data[i] for i in range(ctypes_object.length)] @classmethod @@ -88,7 +88,7 @@ class ByteArray(PrimitiveArray): type_code = TC_BYTE_ARRAY @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): + def to_python(cls, ctypes_object, **kwargs): return bytes(ctypes_object.data) @classmethod @@ -184,7 +184,7 @@ def parse_not_null(cls, stream): return c_type @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return [ctypes_object.data[i] for i in range(ctypes_object.length)] @classmethod @@ -206,7 +206,7 @@ class ByteArrayObject(PrimitiveArrayObject): type_code = TC_BYTE_ARRAY @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return bytes(ctypes_object.data) @classmethod @@ -277,8 +277,8 @@ class CharArrayObject(PrimitiveArrayObject): type_code = TC_CHAR_ARRAY @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): - values = super().to_python_not_null(ctypes_object, *args, **kwargs) + def to_python_not_null(cls, ctypes_object, **kwargs): + values = super().to_python_not_null(ctypes_object, **kwargs) return [ v.to_bytes( ctypes.sizeof(cls.primitive_type.c_type), @@ -296,5 +296,5 @@ class BoolArrayObject(PrimitiveArrayObject): type_code = TC_BOOL_ARRAY @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return [ctypes_object.data[i] != 0 for i in range(ctypes_object.length)] diff --git a/pyignite/datatypes/primitive_objects.py b/pyignite/datatypes/primitive_objects.py index 9b23ec9..4e66334 100644 --- a/pyignite/datatypes/primitive_objects.py +++ b/pyignite/datatypes/primitive_objects.py @@ -65,7 +65,7 @@ def parse_not_null(cls, stream): return data_type @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return ctypes_object.value @classmethod @@ -89,7 +89,7 @@ class ByteObject(DataObject): default = 0 @classmethod - def hashcode(cls, value: int, *args, **kwargs) -> int: + def hashcode(cls, value: int, **kwargs) -> int: return value @@ -102,7 +102,7 @@ class ShortObject(DataObject): default = 0 @classmethod - def hashcode(cls, value: int, *args, **kwargs) -> int: + def hashcode(cls, value: int, **kwargs) -> int: return value @@ -115,7 +115,7 @@ class IntObject(DataObject): default = 0 @classmethod - def hashcode(cls, value: int, *args, **kwargs) -> int: + def hashcode(cls, value: int, **kwargs) -> int: return value @@ -128,7 +128,7 @@ class LongObject(DataObject): default = 0 @classmethod - def hashcode(cls, value: int, *args, **kwargs) -> int: + def hashcode(cls, value: int, **kwargs) -> int: return value ^ (unsigned(value, ctypes.c_ulonglong) >> 32) @@ -141,7 +141,7 @@ class FloatObject(DataObject): default = 0.0 @classmethod - def hashcode(cls, value: float, *args, **kwargs) -> int: + def hashcode(cls, value: float, **kwargs) -> int: return ctypes.cast( ctypes.pointer(ctypes.c_float(value)), ctypes.POINTER(ctypes.c_int) @@ -157,7 +157,7 @@ class DoubleObject(DataObject): default = 0.0 @classmethod - def hashcode(cls, value: float, *args, **kwargs) -> int: + def hashcode(cls, value: float, **kwargs) -> int: bits = ctypes.cast( ctypes.pointer(ctypes.c_double(value)), ctypes.POINTER(ctypes.c_longlong) @@ -180,11 +180,11 @@ class CharObject(DataObject): default = ' ' @classmethod - def hashcode(cls, value: str, *args, **kwargs) -> int: + def hashcode(cls, value: str, **kwargs) -> int: return ord(value) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): value = ctypes_object.value return value.to_bytes( ctypes.sizeof(cls.c_type), @@ -214,9 +214,9 @@ class BoolObject(DataObject): default = False @classmethod - def hashcode(cls, value: bool, *args, **kwargs) -> int: + def hashcode(cls, value: bool, **kwargs) -> int: return 1231 if value else 1237 @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return ctypes_object.value != 0 diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index 5657afb..9173daa 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -71,7 +71,7 @@ class String(Nullable): pythonic = str @classmethod - def hashcode(cls, value: str, *args, **kwargs) -> int: + def hashcode(cls, value: str, **kwargs) -> int: return hashcode(value) @classmethod @@ -101,7 +101,7 @@ def parse_not_null(cls, stream): return data_type @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): if ctypes_object.length > 0: return ctypes_object.data.decode(PROTOCOL_STRING_ENCODING) @@ -132,7 +132,7 @@ class DecimalObject(Nullable): default = decimal.Decimal('0.00') @classmethod - def hashcode(cls, value: decimal.Decimal, *args, **kwargs) -> int: + def hashcode(cls, value: decimal.Decimal, **kwargs) -> int: return decimal_hashcode(value) @classmethod @@ -163,7 +163,7 @@ def parse_not_null(cls, stream): return data_type @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): sign = 1 if ctypes_object.data[0] & 0x80 else 0 data = ctypes_object.data[1:] data.insert(0, ctypes_object.data[0] & 0x7f) @@ -227,7 +227,7 @@ class UUIDObject(StandardObject): UUID_BYTE_ORDER = (7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8) @classmethod - def hashcode(cls, value: 'UUID', *args, **kwargs) -> int: + def hashcode(cls, value: 'UUID', **kwargs) -> int: msb = value.int >> 64 lsb = value.int & 0xffffffffffffffff hilo = msb ^ lsb @@ -263,7 +263,7 @@ def from_python_not_null(cls, stream, value: uuid.UUID, **kwargs): stream.write(data_object) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): uuid_array = bytearray(ctypes_object.value) return uuid.UUID( bytes=bytes([uuid_array[i] for i in cls.UUID_BYTE_ORDER]) @@ -289,7 +289,7 @@ class TimestampObject(StandardObject): default = (datetime(1970, 1, 1), 0) @classmethod - def hashcode(cls, value: Tuple[datetime, int], *args, **kwargs) -> int: + def hashcode(cls, value: Tuple[datetime, int], **kwargs) -> int: return datetime_hashcode(int(value[0].timestamp() * 1000)) @classmethod @@ -323,7 +323,7 @@ def from_python_not_null(cls, stream, value: tuple, **kwargs): stream.write(data_object) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return ( datetime.fromtimestamp(ctypes_object.epoch / 1000), ctypes_object.fraction @@ -345,7 +345,7 @@ class DateObject(StandardObject): default = datetime(1970, 1, 1) @classmethod - def hashcode(cls, value: datetime, *args, **kwargs) -> int: + def hashcode(cls, value: datetime, **kwargs) -> int: return datetime_hashcode(int(value.timestamp() * 1000)) @classmethod @@ -379,7 +379,7 @@ def from_python_not_null(cls, stream, value: [date, datetime], **kwargs): stream.write(data_object) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return datetime.fromtimestamp(ctypes_object.epoch / 1000) @@ -397,7 +397,7 @@ class TimeObject(StandardObject): default = timedelta() @classmethod - def hashcode(cls, value: timedelta, *args, **kwargs) -> int: + def hashcode(cls, value: timedelta, **kwargs) -> int: return datetime_hashcode(int(value.total_seconds() * 1000)) @classmethod @@ -429,7 +429,7 @@ def from_python_not_null(cls, stream, value: timedelta, **kwargs): stream.write(data_object) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return timedelta(milliseconds=ctypes_object.value) @@ -476,7 +476,7 @@ def from_python_not_null(cls, stream, value: tuple, **kwargs): stream.write(data_object) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): + def to_python_not_null(cls, ctypes_object, **kwargs): return ctypes_object.type_id, ctypes_object.ordinal @@ -570,8 +570,8 @@ def from_python(cls, stream, value, **kwargs): cls._from_python(stream, value, **kwargs) @classmethod - def to_python(cls, ctypes_object, *args, **kwargs): - return cls._to_python(ctypes_object, *args, **kwargs) + def to_python(cls, ctypes_object, **kwargs): + return cls._to_python(ctypes_object, **kwargs) class StringArray(StandardArray): @@ -660,8 +660,8 @@ def from_python_not_null(cls, stream, value, **kwargs): cls._from_python(stream, value, **kwargs) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): - return cls._to_python(ctypes_object, *args, **kwargs) + def to_python_not_null(cls, ctypes_object, **kwargs): + return cls._to_python(ctypes_object, **kwargs) class StringArrayObject(StandardArrayObject): @@ -759,8 +759,8 @@ def from_python_not_null(cls, stream, value, **kwargs): super().from_python_not_null(stream, value, type_id=type_id) @classmethod - def to_python_not_null(cls, ctypes_object, *args, **kwargs): - return ctypes_object.type_id, cls._to_python(ctypes_object, *args, **kwargs) + def to_python_not_null(cls, ctypes_object, **kwargs): + return ctypes_object.type_id, cls._to_python(ctypes_object, **kwargs) class BinaryEnumArrayObject(EnumArrayObject): diff --git a/pyignite/queries/response.py b/pyignite/queries/response.py index c0311ec..11e71a7 100644 --- a/pyignite/queries/response.py +++ b/pyignite/queries/response.py @@ -128,25 +128,22 @@ async def _parse_success_async(self, stream, fields: list): c_type = await ignite_type.parse_async(stream) fields.append((name, c_type)) - def to_python(self, ctypes_object, *args, **kwargs): + def to_python(self, ctypes_object, **kwargs): if not self.following: return None result = OrderedDict() for name, c_type in self.following: - result[name] = c_type.to_python( - getattr(ctypes_object, name), - *args, **kwargs - ) + result[name] = c_type.to_python(getattr(ctypes_object, name), **kwargs) return result - async def to_python_async(self, ctypes_object, *args, **kwargs): + async def to_python_async(self, ctypes_object, **kwargs): if not self.following: return None values = await asyncio.gather( - *[c_type.to_python_async(getattr(ctypes_object, name), *args, **kwargs) for name, c_type in self.following] + *[c_type.to_python_async(getattr(ctypes_object, name), **kwargs) for name, c_type in self.following] ) return OrderedDict([(name, values[i]) for i, (name, _) in enumerate(self.following)]) @@ -239,9 +236,9 @@ def __body_class_post_process(body_class, fields, data_fields): ('more', ctypes.c_byte), ] - def to_python(self, ctypes_object, *args, **kwargs): + def to_python(self, ctypes_object, **kwargs): if getattr(ctypes_object, 'status_code', 0) == 0: - result = self.__to_python_result_header(ctypes_object, *args, **kwargs) + result = self.__to_python_result_header(ctypes_object, **kwargs) for row_item in ctypes_object.data._fields_: row_name = row_item[0] @@ -250,13 +247,13 @@ def to_python(self, ctypes_object, *args, **kwargs): for col_item in row_object._fields_: col_name = col_item[0] col_object = getattr(row_object, col_name) - row.append(AnyDataObject.to_python(col_object, *args, **kwargs)) + row.append(AnyDataObject.to_python(col_object, **kwargs)) result['data'].append(row) return result - async def to_python_async(self, ctypes_object, *args, **kwargs): + async def to_python_async(self, ctypes_object, **kwargs): if getattr(ctypes_object, 'status_code', 0) == 0: - result = self.__to_python_result_header(ctypes_object, *args, **kwargs) + result = self.__to_python_result_header(ctypes_object, **kwargs) data_coro = [] for row_item in ctypes_object.data._fields_: @@ -266,7 +263,7 @@ async def to_python_async(self, ctypes_object, *args, **kwargs): for col_item in row_object._fields_: col_name = col_item[0] col_object = getattr(row_object, col_name) - row_coro.append(AnyDataObject.to_python_async(col_object, *args, **kwargs)) + row_coro.append(AnyDataObject.to_python_async(col_object, **kwargs)) data_coro.append(asyncio.gather(*row_coro)) @@ -328,7 +325,7 @@ def __process_type_exists(stream, fields): return type_exists - def to_python(self, ctypes_object, *args, **kwargs): + def to_python(self, ctypes_object, **kwargs): if getattr(ctypes_object, 'status_code', 0) == 0: result = { 'type_exists': Bool.to_python(ctypes_object.type_exists) @@ -349,5 +346,5 @@ def to_python(self, ctypes_object, *args, **kwargs): } return result - async def to_python_async(self, ctypes_object, *args, **kwargs): - return self.to_python(ctypes_object, *args, **kwargs) + async def to_python_async(self, ctypes_object, **kwargs): + return self.to_python(ctypes_object, **kwargs) diff --git a/pyignite/utils.py b/pyignite/utils.py index 975f414..427cceb 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -69,13 +69,6 @@ def is_hinted(value): return isinstance(value, tuple) and len(value) == 2 and issubclass(value[1], IgniteDataType) -def is_wrapped(value: Any) -> bool: - """ - Check if a value is of WrappedDataObject type. - """ - return type(value) is tuple and len(value) == 2 and type(value[0]) is bytes and type(value[1]) is int - - def int_overflow(value: int) -> int: """ Simulates 32bit integer overflow. diff --git a/tests/common/test_binary.py b/tests/common/test_binary.py index c94c4d5..449709e 100644 --- a/tests/common/test_binary.py +++ b/tests/common/test_binary.py @@ -451,13 +451,13 @@ def complex_objects(): def test_complex_object_hash(client, complex_objects): for obj, hash in complex_objects: - assert hash == BinaryObject.hashcode(obj, client) + assert hash == BinaryObject.hashcode(obj, client=client) @pytest.mark.asyncio async def test_complex_object_hash_async(async_client, complex_objects): for obj, hash in complex_objects: - assert hash == await BinaryObject.hashcode_async(obj, async_client) + assert hash == await BinaryObject.hashcode_async(obj, client=async_client) def camel_to_snake(name): @@ -504,3 +504,55 @@ async def test_complex_object_null_fields_async(async_cache, null_fields_object) """ await async_cache.put(1, null_fields_object) assert await async_cache.get(1) == null_fields_object, 'Objects mismatch' + + +def test_object_with_collections_of_binary_objects(cache): + __check_object_with_collections_of_binary_objects(cache) + + +@pytest.mark.asyncio +async def test_object_with_collections_of_binary_objects_async(async_cache): + await __check_object_with_collections_of_binary_objects(async_cache) + + +def __check_object_with_collections_of_binary_objects(cache): + class Container( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'collection': CollectionObject, + 'array': ObjectArrayObject, + 'map': MapObject + } + ): + pass + + class Value( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'name': String + } + ): + pass + + def fixtures(): + map_obj = (MapObject.HASH_MAP, {i: Value(i, f'val_{i}') for i in range(10)}) + col_obj = (CollectionObject.ARR_LIST, [Value(i, f'val_{i}') for i in range(10)]) + arr_obj = (ObjectArrayObject.OBJECT, [Value(i, f'val_{i}') for i in range(10)]) + return [ + Container(1, map=map_obj, collection=col_obj, array=arr_obj), + Container(2), # Check if collections are not set + ] + + async def inner_async(): + for i, val in enumerate(fixtures()): + await cache.put(i, val) + assert await cache.get(i) == val + + def inner(): + for i, val in enumerate(fixtures()): + cache.put(i, val) + assert cache.get(i) == val + + return inner_async() if isinstance(cache, AioCache) else inner() diff --git a/tests/common/test_datatypes.py b/tests/common/test_datatypes.py index 6771f94..ebbafb6 100644 --- a/tests/common/test_datatypes.py +++ b/tests/common/test_datatypes.py @@ -20,6 +20,7 @@ import pytest import uuid +from pyignite import GenericObjectMeta from pyignite.datatypes import ( ByteObject, IntObject, FloatObject, CharObject, ShortObject, BoolObject, ByteArrayObject, IntArrayObject, ShortArrayObject, FloatArrayObject, BoolArrayObject, CharArrayObject, TimestampObject, String, BinaryEnumObject, @@ -27,6 +28,17 @@ ) from pyignite.utils import unsigned + +class Value( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'name': String, + } +): + pass + + put_get_data_params = [ # integers (42, None), @@ -124,6 +136,7 @@ # object array ((ObjectArrayObject.OBJECT, [1, 2, decimal.Decimal('3'), bytearray(b'\x10\x20')]), ObjectArrayObject), + ((ObjectArrayObject.OBJECT, [Value(id=i, name=f'val_{i}') for i in range(10)]), ObjectArrayObject), # collection ((CollectionObject.LINKED_LIST, [1, 2, 3]), None), diff --git a/tests/common/test_key_value.py b/tests/common/test_key_value.py index b03bec2..e26d373 100644 --- a/tests/common/test_key_value.py +++ b/tests/common/test_key_value.py @@ -17,7 +17,8 @@ import pytest -from pyignite.datatypes import CollectionObject, IntObject, MapObject, TimestampObject +from pyignite import GenericObjectMeta +from pyignite.datatypes import CollectionObject, IntObject, MapObject, TimestampObject, String def test_put_get(cache): @@ -352,16 +353,35 @@ async def test_cache_get_size_async(async_cache): assert await async_cache.get_size() == 1 +class Value( + metaclass=GenericObjectMeta, + schema={ + 'id': IntObject, + 'name': String, + } +): + pass + + collection_params = [ [ 'simple', - (1, [(123, IntObject), 678, None, 55.2, ((datetime(year=1996, month=3, day=1), 0), TimestampObject)]), - (1, [123, 678, None, 55.2, (datetime(year=1996, month=3, day=1), 0)]) + (CollectionObject.ARR_LIST, [ + (123, IntObject), 678, None, 55.2, ((datetime(year=1996, month=3, day=1), 0), TimestampObject) + ]), + (CollectionObject.ARR_LIST, [123, 678, None, 55.2, (datetime(year=1996, month=3, day=1), 0)]) ], [ 'nested', - (1, [123, ((1, [456, 'inner_test_string', 789]), CollectionObject), 'outer_test_string']), - (1, [123, (1, [456, 'inner_test_string', 789]), 'outer_test_string']) + (CollectionObject.ARR_LIST, [ + 123, ((1, [456, 'inner_test_string', 789]), CollectionObject), 'outer_test_string' + ]), + (CollectionObject.ARR_LIST, [123, (1, [456, 'inner_test_string', 789]), 'outer_test_string']) + ], + [ + 'binary', + (CollectionObject.ARR_LIST, [Value(id=i, name=f'val_{i}') for i in range(0, 10)]), + (CollectionObject.ARR_LIST, [Value(id=i, name=f'val_{i}') for i in range(0, 10)]), ], [ 'hash_map', @@ -403,6 +423,11 @@ async def test_cache_get_size_async(async_cache): } ) ], + [ + 'binary_map', + (MapObject.HASH_MAP, {i: Value(id=i, name=f"val_{i}") for i in range(10)}), + (MapObject.HASH_MAP, {i: Value(id=i, name=f"val_{i}") for i in range(10)}) + ] ] From 8c8a006a0b0e913f9d38dd45dab80001b144c3c6 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 19 May 2021 16:20:22 +0300 Subject: [PATCH 37/62] IGNITE-14738 Remove obsolete setup dependency to pytest-runner - Fixes #38. --- requirements/install.txt | 2 +- requirements/setup.txt | 3 --- setup.py | 33 +++++++----------------------- tests/common/test_expiry_policy.py | 2 +- 4 files changed, 9 insertions(+), 31 deletions(-) delete mode 100644 requirements/setup.txt diff --git a/requirements/install.txt b/requirements/install.txt index 1ee12a9..feb4eb6 100644 --- a/requirements/install.txt +++ b/requirements/install.txt @@ -1,3 +1,3 @@ # these pip packages are necessary for the pyignite to run -attrs==20.3.0 +attrs>=20.3.0 diff --git a/requirements/setup.txt b/requirements/setup.txt deleted file mode 100644 index d202467..0000000 --- a/requirements/setup.txt +++ /dev/null @@ -1,3 +0,0 @@ -# additional package for integrating pytest in setuptools - -pytest-runner==5.3.0 diff --git a/setup.py b/setup.py index 7a3cb70..1a4071d 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -from collections import defaultdict from distutils.command.build_ext import build_ext from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError @@ -63,25 +62,12 @@ def is_a_requirement(line): ]) -requirement_sections = [ - 'install', - 'setup', - 'tests', - 'docs', -] - -requirements = defaultdict(list) - -for section in requirement_sections: - with open( - 'requirements/{}.txt'.format(section), - 'r', - encoding='utf-8', - ) as requirements_file: - for line in requirements_file.readlines(): - line = line.strip('\n') - if is_a_requirement(line): - requirements[section].append(line) +install_requirements = [] +with open('requirements/install.txt', 'r', encoding='utf-8') as requirements_file: + for line in requirements_file.readlines(): + line = line.strip('\n') + if is_a_requirement(line): + install_requirements.append(line) with open('README.md', 'r', encoding='utf-8') as readme_file: long_description = readme_file.read() @@ -114,12 +100,7 @@ def run_setup(with_binary=True): long_description_content_type='text/markdown', url='https://github.com/apache/ignite-python-thin-client', packages=setuptools.find_packages(), - install_requires=requirements['install'], - tests_require=requirements['tests'], - setup_requires=requirements['setup'], - extras_require={ - 'docs': requirements['docs'], - }, + install_requires=install_requirements, license="Apache License 2.0", license_files=('LICENSE', 'NOTICE'), classifiers=[ diff --git a/tests/common/test_expiry_policy.py b/tests/common/test_expiry_policy.py index cc852c7..9dc4152 100644 --- a/tests/common/test_expiry_policy.py +++ b/tests/common/test_expiry_policy.py @@ -124,7 +124,7 @@ async def test_expiry_policy_async(async_cache): await asyncio.sleep(ttl * 2 / 3) - cache_updated.get(2) # Check that access doesn't matter for updated policy. + await cache_updated.get(2) # Check that access doesn't matter for updated policy. await asyncio.sleep(ttl * 2 / 3) From fa364dfd1079571a93299f2b1289a4993290b0c4 Mon Sep 17 00:00:00 2001 From: Rob Emanuele Date: Tue, 8 Jun 2021 16:48:51 +0300 Subject: [PATCH 38/62] IGNITE-14686 Fix incorrect type hint for cache's get_all - Fixes #36. Signed-off-by: Ivan Daschinsky --- pyignite/aio_cache.py | 2 +- pyignite/cache.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py index f088844..b6b534b 100644 --- a/pyignite/aio_cache.py +++ b/pyignite/aio_cache.py @@ -159,7 +159,7 @@ async def put(self, key, value, key_hint: object = None, value_hint: object = No return await cache_put_async(conn, self.cache_info, key, value, key_hint=key_hint, value_hint=value_hint) @status_to_exception(CacheError) - async def get_all(self, keys: list) -> list: + async def get_all(self, keys: list) -> dict: """ Retrieves multiple key-value pairs from cache. diff --git a/pyignite/cache.py b/pyignite/cache.py index a2444a4..c0aaaec 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -253,7 +253,7 @@ def put(self, key, value, key_hint: object = None, value_hint: object = None): ) @status_to_exception(CacheError) - def get_all(self, keys: list) -> list: + def get_all(self, keys: list) -> dict: """ Retrieves multiple key-value pairs from cache. From 9945ecbbcf47b66e485855e91417f991345b7453 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 9 Jun 2021 14:07:08 +0300 Subject: [PATCH 39/62] IGNITE-12467 Implement transactions, rewrite async connections using protocol and transport - Fixes #40. --- .../pyignite.datatypes.transactions.rst | 21 + docs/source/pyignite.rst | 1 + docs/source/pyignite.transaction.rst | 22 + pyignite/aio_cache.py | 4 + pyignite/aio_client.py | 32 +- pyignite/api/affinity.py | 19 +- pyignite/api/binary.py | 31 +- pyignite/api/cache_config.py | 103 ++-- pyignite/api/cluster.py | 31 +- pyignite/api/key_value.py | 442 +++++++----------- pyignite/api/sql.py | 101 ++-- pyignite/api/tx_api.py | 124 +++++ pyignite/cache.py | 6 +- pyignite/client.py | 27 +- pyignite/connection/aio_connection.py | 205 ++++---- pyignite/connection/protocol_context.py | 6 + pyignite/datatypes/__init__.py | 1 + pyignite/datatypes/cache_config.py | 1 + pyignite/datatypes/transactions.py | 42 ++ pyignite/exceptions.py | 10 +- pyignite/queries/cache_info.py | 61 +++ pyignite/queries/op_codes.py | 3 + pyignite/queries/query.py | 49 +- pyignite/transaction.py | 130 ++++++ requirements/install.txt | 1 + tests/affinity/conftest.py | 2 +- .../affinity/test_affinity_request_routing.py | 4 +- .../test_affinity_single_connection.py | 2 +- tests/common/test_transactions.py | 231 +++++++++ tests/config/ignite-config.xml.jinja2 | 1 + tests/custom/test_cluster.py | 7 +- 31 files changed, 1114 insertions(+), 606 deletions(-) create mode 100644 docs/source/pyignite.datatypes.transactions.rst create mode 100644 docs/source/pyignite.transaction.rst create mode 100644 pyignite/api/tx_api.py create mode 100644 pyignite/datatypes/transactions.py create mode 100644 pyignite/queries/cache_info.py create mode 100644 pyignite/transaction.py create mode 100644 tests/common/test_transactions.py diff --git a/docs/source/pyignite.datatypes.transactions.rst b/docs/source/pyignite.datatypes.transactions.rst new file mode 100644 index 0000000..9b38468 --- /dev/null +++ b/docs/source/pyignite.datatypes.transactions.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.datatypes.transactions module +======================================= + +.. automodule:: pyignite.datatypes.transactions + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/pyignite.rst b/docs/source/pyignite.rst index c2a36fe..2e52500 100644 --- a/docs/source/pyignite.rst +++ b/docs/source/pyignite.rst @@ -41,6 +41,7 @@ Submodules pyignite.aio_client pyignite.cluster pyignite.aio_cluster + pyignite.transaction pyignite.cursors pyignite.exceptions diff --git a/docs/source/pyignite.transaction.rst b/docs/source/pyignite.transaction.rst new file mode 100644 index 0000000..7c6b016 --- /dev/null +++ b/docs/source/pyignite.transaction.rst @@ -0,0 +1,22 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.transaction module +========================= + +.. automodule:: pyignite.transaction + :members: + :undoc-members: + :show-inheritance: diff --git a/pyignite/aio_cache.py b/pyignite/aio_cache.py index b6b534b..7a92a9a 100644 --- a/pyignite/aio_cache.py +++ b/pyignite/aio_cache.py @@ -15,6 +15,7 @@ import asyncio from typing import Any, Iterable, Optional, Union +from .api.tx_api import get_tx_connection from .datatypes import ExpiryPolicy from .datatypes.internal import AnyDataObject from .exceptions import CacheCreationError, CacheError, ParameterError @@ -91,6 +92,9 @@ def __init__(self, client: 'AioClient', name: str, expiry_policy: ExpiryPolicy = super().__init__(client, name, expiry_policy) async def _get_best_node(self, key=None, key_hint=None): + tx_conn = get_tx_connection() + if tx_conn: + return tx_conn return await self.client.get_best_node(self, key, key_hint) async def settings(self) -> Optional[dict]: diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 8c2ca56..26d243d 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -14,8 +14,9 @@ # limitations under the License. import asyncio import random +import sys from itertools import chain -from typing import Iterable, Type, Union, Any, Dict +from typing import Iterable, Type, Union, Any, Dict, Optional from .aio_cluster import AioCluster from .api import cache_get_node_partitions_async @@ -27,10 +28,11 @@ from .aio_cache import AioCache, get_cache, create_cache, get_or_create_cache from .connection import AioConnection from .constants import AFFINITY_RETRIES, AFFINITY_DELAY -from .datatypes import BinaryObject -from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors -from .queries.query import CacheInfo +from .datatypes import BinaryObject, TransactionConcurrency, TransactionIsolation +from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors, NotSupportedError +from .queries.cache_info import CacheInfo from .stream import AioBinaryStream, READ_BACKWARD +from .transaction import AioTransaction from .utils import cache_id, entity_id, status_to_exception @@ -471,9 +473,9 @@ def sql( elif isinstance(cache, AioCache): c_info = cache.cache_info else: - c_info = None + c_info = CacheInfo(protocol_context=self.protocol_context) - if c_info: + if c_info.cache_id: schema = None return AioSqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, @@ -487,3 +489,21 @@ def get_cluster(self) -> 'AioCluster': :return: :py:class:`~pyignite.aio_cluster.AioCluster` instance. """ return AioCluster(self) + + def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency.PESSIMISTIC, + isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ, + timeout: Union[int, float] = 0, label: Optional[str] = None) -> 'AioTransaction': + """ + Start async thin client transaction. + + :param concurrency: (optional) transaction concurrency, see + :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency` + :param isolation: (optional) transaction isolation level, see + :py:class:`~pyignite.datatypes.transactions.TransactionIsolation` + :param timeout: (optional) transaction timeout in seconds if float, in millis if int + :param label: (optional) transaction label. + :return: :py:class:`~pyignite.transaction.AioTransaction` instance. + """ + if sys.version_info < (3, 7): + raise NotSupportedError(f"Transactions are not supported in async client on current python {sys.version}") + return AioTransaction(self, concurrency, isolation, timeout, label) diff --git a/pyignite/api/affinity.py b/pyignite/api/affinity.py index ddf1e7a..30e93ff 100644 --- a/pyignite/api/affinity.py +++ b/pyignite/api/affinity.py @@ -68,27 +68,23 @@ ]) -def cache_get_node_partitions(conn: 'Connection', caches: Union[int, Iterable[int]], query_id: int = None) -> APIResult: +def cache_get_node_partitions(conn: 'Connection', caches: Union[int, Iterable[int]]) -> APIResult: """ Gets partition mapping for an Ignite cache or a number of caches. See “IEP-23: Best Effort Affinity for thin clients”. :param conn: connection to Ignite server, - :param caches: cache ID(s) the mapping is provided for, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + :param caches: cache ID(s) the mapping is provided for :return: API result data object. """ - return __cache_get_node_partitions(conn, caches, query_id) + return __cache_get_node_partitions(conn, caches) -async def cache_get_node_partitions_async(conn: 'AioConnection', caches: Union[int, Iterable[int]], - query_id: int = None) -> APIResult: +async def cache_get_node_partitions_async(conn: 'AioConnection', caches: Union[int, Iterable[int]]) -> APIResult: """ Async version of cache_get_node_partitions. """ - return await __cache_get_node_partitions(conn, caches, query_id) + return await __cache_get_node_partitions(conn, caches) def __post_process_partitions(result): @@ -135,13 +131,12 @@ def __post_process_partitions(result): return result -def __cache_get_node_partitions(conn, caches, query_id): +def __cache_get_node_partitions(conn, caches): query_struct = Query( OP_CACHE_PARTITIONS, [ ('cache_ids', cache_ids), - ], - query_id=query_id + ] ) if not is_iterable(caches): caches = [caches] diff --git a/pyignite/api/binary.py b/pyignite/api/binary.py index 345e8e8..b49ab8b 100644 --- a/pyignite/api/binary.py +++ b/pyignite/api/binary.py @@ -26,34 +26,30 @@ from ..queries.response import BinaryTypeResponse -def get_binary_type(conn: 'Connection', binary_type: Union[str, int], query_id=None) -> APIResult: +def get_binary_type(conn: 'Connection', binary_type: Union[str, int]) -> APIResult: """ Gets the binary type information by type ID. :param conn: connection to Ignite server, :param binary_type: binary type name or ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. """ - return __get_binary_type(conn, binary_type, query_id) + return __get_binary_type(conn, binary_type) -async def get_binary_type_async(conn: 'AioConnection', binary_type: Union[str, int], query_id=None) -> APIResult: +async def get_binary_type_async(conn: 'AioConnection', binary_type: Union[str, int]) -> APIResult: """ Async version of get_binary_type. """ - return await __get_binary_type(conn, binary_type, query_id) + return await __get_binary_type(conn, binary_type) -def __get_binary_type(conn, binary_type, query_id): +def __get_binary_type(conn, binary_type): query_struct = Query( OP_GET_BINARY_TYPE, [ ('type_id', Int), ], - query_id=query_id, response_type=BinaryTypeResponse ) @@ -63,7 +59,7 @@ def __get_binary_type(conn, binary_type, query_id): def put_binary_type(connection: 'Connection', type_name: str, affinity_key_field: str = None, - is_enum=False, schema: dict = None, query_id=None) -> APIResult: + is_enum=False, schema: dict = None) -> APIResult: """ Registers binary type information in cluster. @@ -76,12 +72,9 @@ def put_binary_type(connection: 'Connection', type_name: str, affinity_key_field parameter names as keys and an integers as values. When register binary type, pass a dict of field names: field types. Binary type with no fields is OK, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. """ - return __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema, query_id) + return __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema) async def put_binary_type_async(connection: 'AioConnection', type_name: str, affinity_key_field: str = None, @@ -89,7 +82,7 @@ async def put_binary_type_async(connection: 'AioConnection', type_name: str, aff """ Async version of put_binary_type. """ - return await __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema, query_id) + return await __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema) def __post_process_put_binary(type_id): @@ -103,7 +96,7 @@ def internal(result): return internal -def __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema, query_id): +def __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema): # prepare data if schema is None: schema = {} @@ -158,8 +151,7 @@ def __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema ('is_enum', Bool), ('enums', enum_struct), ('schema', schema_struct), - ], - query_id=query_id, + ] ) else: query_struct = Query( @@ -171,8 +163,7 @@ def __put_binary_type(connection, type_name, affinity_key_field, is_enum, schema ('binary_fields', binary_fields_struct), ('is_enum', Bool), ('schema', schema_struct), - ], - query_id=query_id, + ] ) return query_perform(query_struct, connection, query_params=data, post_process_fun=__post_process_put_binary(type_id)) diff --git a/pyignite/api/cache_config.py b/pyignite/api/cache_config.py index 7f2869b..d4a5f81 100644 --- a/pyignite/api/cache_config.py +++ b/pyignite/api/cache_config.py @@ -39,7 +39,7 @@ from .result import APIResult from ..datatypes.prop_codes import PROP_EXPIRY_POLICY from ..exceptions import NotSupportedByClusterError -from ..queries.query import CacheInfo +from ..queries.cache_info import CacheInfo def compact_cache_config(cache_config: dict) -> dict: @@ -60,27 +60,23 @@ def compact_cache_config(cache_config: dict) -> dict: return result -def cache_get_configuration(connection: 'Connection', cache_info: CacheInfo, query_id=None) -> 'APIResult': +def cache_get_configuration(connection: 'Connection', cache_info: CacheInfo) -> 'APIResult': """ Gets configuration for the given cache. :param connection: connection to Ignite server, :param cache_info: cache meta info, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Result value is OrderedDict with the cache configuration parameters. """ - return __cache_get_configuration(connection, cache_info, query_id) + return __cache_get_configuration(connection, cache_info) -async def cache_get_configuration_async( - connection: 'AioConnection', cache_info: CacheInfo, query_id=None) -> 'APIResult': +async def cache_get_configuration_async(connection: 'AioConnection', cache_info: CacheInfo) -> 'APIResult': """ Async version of cache_get_configuration. """ - return await __cache_get_configuration(connection, cache_info, query_id) + return await __cache_get_configuration(connection, cache_info) def __post_process_cache_config(result): @@ -89,13 +85,12 @@ def __post_process_cache_config(result): return result -def __cache_get_configuration(connection, cache_info, query_id): +def __cache_get_configuration(connection, cache_info): query_struct = Query( OP_CACHE_GET_CONFIGURATION, [ ('cache_info', CacheInfo) - ], - query_id=query_id, + ] ) return query_perform(query_struct, connection, query_params={ @@ -108,106 +103,94 @@ def __cache_get_configuration(connection, cache_info, query_id): ) -def cache_create(connection: 'Connection', name: str, query_id=None) -> 'APIResult': +def cache_create(connection: 'Connection', name: str) -> 'APIResult': """ Creates a cache with a given name. Returns error if a cache with specified name already exists. :param connection: connection to Ignite server, :param name: cache name, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if a cache is created successfully, non-zero status and an error description otherwise. """ - return __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name, query_id) + return __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name) -async def cache_create_async(connection: 'AioConnection', name: str, query_id=None) -> 'APIResult': +async def cache_create_async(connection: 'AioConnection', name: str) -> 'APIResult': """ Async version of cache_create. """ - return await __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name, query_id) + return await __cache_create_with_name(OP_CACHE_CREATE_WITH_NAME, connection, name) -def cache_get_or_create(connection: 'Connection', name: str, query_id=None) -> 'APIResult': +def cache_get_or_create(connection: 'Connection', name: str) -> 'APIResult': """ Creates a cache with a given name. Does nothing if the cache exists. :param connection: connection to Ignite server, :param name: cache name, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if a cache is created successfully, non-zero status and an error description otherwise. """ - return __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name, query_id) + return __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name) -async def cache_get_or_create_async(connection: 'AioConnection', name: str, query_id=None) -> 'APIResult': +async def cache_get_or_create_async(connection: 'AioConnection', name: str) -> 'APIResult': """ Async version of cache_get_or_create. """ - return await __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name, query_id) + return await __cache_create_with_name(OP_CACHE_GET_OR_CREATE_WITH_NAME, connection, name) -def __cache_create_with_name(op_code, conn, name, query_id): - query_struct = Query(op_code, [('cache_name', String)], query_id=query_id) +def __cache_create_with_name(op_code, conn, name): + query_struct = Query(op_code, [('cache_name', String)]) return query_perform(query_struct, conn, query_params={'cache_name': name}) -def cache_destroy(connection: 'Connection', cache: Union[str, int], query_id=None) -> 'APIResult': +def cache_destroy(connection: 'Connection', cache: Union[str, int]) -> 'APIResult': """ Destroys cache with a given name. :param connection: connection to Ignite server, :param cache: name or ID of the cache, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. """ - return __cache_destroy(connection, cache, query_id) + return __cache_destroy(connection, cache) -async def cache_destroy_async(connection: 'AioConnection', cache: Union[str, int], query_id=None) -> 'APIResult': +async def cache_destroy_async(connection: 'AioConnection', cache: Union[str, int]) -> 'APIResult': """ Async version of cache_destroy. """ - return await __cache_destroy(connection, cache, query_id) + return await __cache_destroy(connection, cache) -def __cache_destroy(connection, cache, query_id): - query_struct = Query(OP_CACHE_DESTROY, [('cache_id', Int)], query_id=query_id) +def __cache_destroy(connection, cache): + query_struct = Query(OP_CACHE_DESTROY, [('cache_id', Int)]) return query_perform(query_struct, connection, query_params={'cache_id': cache_id(cache)}) -def cache_get_names(connection: 'Connection', query_id=None) -> 'APIResult': +def cache_get_names(connection: 'Connection') -> 'APIResult': """ Gets existing cache names. :param connection: connection to Ignite server, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a list of cache names, non-zero status and an error description otherwise. """ - return __cache_get_names(connection, query_id) + return __cache_get_names(connection) -async def cache_get_names_async(connection: 'AioConnection', query_id=None) -> 'APIResult': +async def cache_get_names_async(connection: 'AioConnection') -> 'APIResult': """ Async version of cache_get_names. """ - return await __cache_get_names(connection, query_id) + return await __cache_get_names(connection) def __post_process_cache_names(result): @@ -216,14 +199,14 @@ def __post_process_cache_names(result): return result -def __cache_get_names(connection, query_id): - query_struct = Query(OP_CACHE_GET_NAMES, query_id=query_id) +def __cache_get_names(connection): + query_struct = Query(OP_CACHE_GET_NAMES) return query_perform(query_struct, connection, response_config=[('cache_names', StringArray)], post_process_fun=__post_process_cache_names) -def cache_create_with_config(connection: 'Connection', cache_props: dict, query_id=None) -> 'APIResult': +def cache_create_with_config(connection: 'Connection', cache_props: dict) -> 'APIResult': """ Creates cache with provided configuration. An error is returned if the name is already in use. @@ -232,23 +215,20 @@ def cache_create_with_config(connection: 'Connection', cache_props: dict, query_ :param cache_props: cache configuration properties to create cache with in form of dictionary {property code: python value}. You must supply at least name (PROP_NAME), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if cache was created, non-zero status and an error description otherwise. """ - return __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props, query_id) + return __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props) -async def cache_create_with_config_async(connection: 'AioConnection', cache_props: dict, query_id=None) -> 'APIResult': +async def cache_create_with_config_async(connection: 'AioConnection', cache_props: dict) -> 'APIResult': """ Async version of cache_create_with_config. """ - return await __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props, query_id) + return await __cache_create_with_config(OP_CACHE_CREATE_WITH_CONFIGURATION, connection, cache_props) -def cache_get_or_create_with_config(connection: 'Connection', cache_props: dict, query_id=None) -> 'APIResult': +def cache_get_or_create_with_config(connection: 'Connection', cache_props: dict) -> 'APIResult': """ Creates cache with provided configuration. Does nothing if the name is already in use. @@ -257,25 +237,20 @@ def cache_get_or_create_with_config(connection: 'Connection', cache_props: dict, :param cache_props: cache configuration properties to create cache with in form of dictionary {property code: python value}. You must supply at least name (PROP_NAME), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if cache was created, non-zero status and an error description otherwise. """ - return __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props, query_id) + return __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props) -async def cache_get_or_create_with_config_async(connection: 'AioConnection', cache_props: dict, - query_id=None) -> 'APIResult': +async def cache_get_or_create_with_config_async(connection: 'AioConnection', cache_props: dict) -> 'APIResult': """ Async version of cache_get_or_create_with_config. """ - return await __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props, - query_id) + return await __cache_create_with_config(OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION, connection, cache_props) -def __cache_create_with_config(op_code, connection, cache_props, query_id): +def __cache_create_with_config(op_code, connection, cache_props): prop_types, prop_values = {}, {} is_expiry_policy_supported = connection.protocol_context.is_expiry_policy_supported() for i, prop_item in enumerate(cache_props.items()): @@ -289,5 +264,5 @@ def __cache_create_with_config(op_code, connection, cache_props, query_id): prop_values['param_count'] = len(cache_props) following = [('param_count', Short)] + list(prop_types.items()) - query_struct = ConfigQuery(op_code, following, query_id=query_id) + query_struct = ConfigQuery(op_code, following) return query_perform(query_struct, connection, query_params=prop_values) diff --git a/pyignite/api/cluster.py b/pyignite/api/cluster.py index e134239..50c71bd 100644 --- a/pyignite/api/cluster.py +++ b/pyignite/api/cluster.py @@ -20,25 +20,22 @@ from pyignite.queries.op_codes import OP_CLUSTER_GET_STATE, OP_CLUSTER_CHANGE_STATE -def cluster_get_state(connection: 'Connection', query_id=None) -> 'APIResult': +def cluster_get_state(connection: 'Connection') -> 'APIResult': """ Get cluster state. :param connection: Connection to use, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a state retrieved on success, non-zero status and an error description on failure. """ - return __cluster_get_state(connection, query_id) + return __cluster_get_state(connection) -async def cluster_get_state_async(connection: 'AioConnection', query_id=None) -> 'APIResult': +async def cluster_get_state_async(connection: 'AioConnection') -> 'APIResult': """ Async version of cluster_get_state """ - return await __cluster_get_state(connection, query_id) + return await __cluster_get_state(connection) def __post_process_get_state(result): @@ -47,11 +44,11 @@ def __post_process_get_state(result): return result -def __cluster_get_state(connection, query_id): +def __cluster_get_state(connection): if not connection.protocol_context.is_cluster_api_supported(): raise NotSupportedByClusterError('Cluster API is not supported by the cluster') - query_struct = Query(OP_CLUSTER_GET_STATE, query_id=query_id) + query_struct = Query(OP_CLUSTER_GET_STATE) return query_perform( query_struct, connection, response_config=[('state', Byte)], @@ -59,26 +56,23 @@ def __cluster_get_state(connection, query_id): ) -def cluster_set_state(connection: 'Connection', state: int, query_id=None) -> 'APIResult': +def cluster_set_state(connection: 'Connection', state: int) -> 'APIResult': """ Set cluster state. :param connection: Connection to use, :param state: State to set, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if a value is written, non-zero status and an error description otherwise. """ - return __cluster_set_state(connection, state, query_id) + return __cluster_set_state(connection, state) -async def cluster_set_state_async(connection: 'AioConnection', state: int, query_id=None) -> 'APIResult': +async def cluster_set_state_async(connection: 'AioConnection', state: int) -> 'APIResult': """ Async version of cluster_get_state """ - return await __cluster_set_state(connection, state, query_id) + return await __cluster_set_state(connection, state) def __post_process_set_state(result): @@ -87,7 +81,7 @@ def __post_process_set_state(result): return result -def __cluster_set_state(connection, state, query_id): +def __cluster_set_state(connection, state): if not connection.protocol_context.is_cluster_api_supported(): raise NotSupportedByClusterError('Cluster API is not supported by the cluster') @@ -95,8 +89,7 @@ def __cluster_set_state(connection, state, query_id): OP_CLUSTER_CHANGE_STATE, [ ('state', Byte) - ], - query_id=query_id + ] ) return query_perform( query_struct, connection, diff --git a/pyignite/api/key_value.py b/pyignite/api/key_value.py index 5038051..5b3f72c 100644 --- a/pyignite/api/key_value.py +++ b/pyignite/api/key_value.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable, Optional, Union +from typing import Any, Iterable, Union from pyignite.connection import AioConnection, Connection from pyignite.queries.op_codes import ( @@ -28,12 +28,11 @@ from pyignite.queries import Query, query_perform from .result import APIResult -from ..queries.query import CacheInfo +from ..queries.cache_info import CacheInfo def cache_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache (overwriting existing value if any). @@ -45,33 +44,28 @@ def cache_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if a value is written, non-zero status and an error description otherwise. """ - return __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id) + return __cache_put(connection, cache_info, key, value, key_hint, value_hint) async def cache_put_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Async version of cache_put """ - return await __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id) + return await __cache_put(connection, cache_info, key, value, key_hint, value_hint) -def __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_id): +def __cache_put(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_PUT, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -83,8 +77,8 @@ def __cache_put(connection, cache_info, key, value, key_hint, value_hint, query_ ) -def cache_get(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +def cache_get(connection: 'Connection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None) -> 'APIResult': """ Retrieves a value from cache by key. @@ -92,32 +86,28 @@ def cache_get(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hin :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + should be converted :return: API result data object. Contains zero status and a value retrieved on success, non-zero status and an error description on failure. """ - return __cache_get(connection, cache_info, key, key_hint, query_id) + return __cache_get(connection, cache_info, key, key_hint) async def cache_get_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, - key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None) -> 'APIResult': """ Async version of cache_get """ - return await __cache_get(connection, cache_info, key, key_hint, query_id) + return await __cache_get(connection, cache_info, key, key_hint) -def __cache_get(connection, cache_info, key, key_hint, query_id): +def __cache_get(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_GET, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -132,40 +122,34 @@ def __cache_get(connection, cache_info, key, key_hint, query_id): ) -def cache_get_all(connection: 'Connection', cache_info: CacheInfo, keys: Iterable, - query_id: Optional[int] = None) -> 'APIResult': +def cache_get_all(connection: 'Connection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Retrieves multiple key-value pairs from cache. :param connection: connection to Ignite server, :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a dict, made of retrieved key-value pairs, non-zero status and an error description on failure. """ - return __cache_get_all(connection, cache_info, keys, query_id) + return __cache_get_all(connection, cache_info, keys) -async def cache_get_all_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_get_all_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Async version of cache_get_all. """ - return await __cache_get_all(connection, cache_info, keys, query_id) + return await __cache_get_all(connection, cache_info, keys) -def __cache_get_all(connection, cache_info, keys, query_id): +def __cache_get_all(connection, cache_info, keys): query_struct = Query( OP_CACHE_GET_ALL, [ ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -180,8 +164,7 @@ def __cache_get_all(connection, cache_info, keys, query_id): ) -def cache_put_all(connection: 'Connection', cache_info: CacheInfo, pairs: dict, - query_id: Optional[int] = None) -> 'APIResult': +def cache_put_all(connection: 'Connection', cache_info: CacheInfo, pairs: dict) -> 'APIResult': """ Puts multiple key-value pairs to cache (overwriting existing associations if any). @@ -191,31 +174,26 @@ def cache_put_all(connection: 'Connection', cache_info: CacheInfo, pairs: dict, :param pairs: dictionary type parameters, contains key-value pairs to save. Each key or value can be an item of representable Python type or a tuple of (item, hint), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status if key-value pairs are written, non-zero status and an error description otherwise. """ - return __cache_put_all(connection, cache_info, pairs, query_id) + return __cache_put_all(connection, cache_info, pairs) -async def cache_put_all_async(connection: 'AioConnection', cache_info: CacheInfo, pairs: dict, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_put_all_async(connection: 'AioConnection', cache_info: CacheInfo, pairs: dict) -> 'APIResult': """ Async version of cache_put_all. """ - return await __cache_put_all(connection, cache_info, pairs, query_id) + return await __cache_put_all(connection, cache_info, pairs) -def __cache_put_all(connection, cache_info, pairs, query_id): +def __cache_put_all(connection, cache_info, pairs): query_struct = Query( OP_CACHE_PUT_ALL, [ ('cache_info', CacheInfo), ('data', Map), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -226,8 +204,8 @@ def __cache_put_all(connection, cache_info, pairs, query_id): ) -def cache_contains_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +def cache_contains_key(connection: 'Connection', cache_info: CacheInfo, key: Any, + key_hint: 'IgniteDataType' = None) -> 'APIResult': """ Returns a value indicating whether given key is present in cache. @@ -235,33 +213,29 @@ def cache_contains_key(connection: 'Connection', cache_info: CacheInfo, key: Any :param cache_info: cache meta info, :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, + should be converted :return: API result data object. Contains zero status and a bool value retrieved on success: `True` when key is present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_key(connection, cache_info, key, key_hint, query_id) + return __cache_contains_key(connection, cache_info, key, key_hint) async def cache_contains_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, - key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None) -> 'APIResult': """ Async version of cache_contains_key. """ - return await __cache_contains_key(connection, cache_info, key, key_hint, query_id) + return await __cache_contains_key(connection, cache_info, key, key_hint) -def __cache_contains_key(connection, cache_info, key, key_hint, query_id): +def __cache_contains_key(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_CONTAINS_KEY, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -277,39 +251,34 @@ def __cache_contains_key(connection, cache_info, key, key_hint, query_id): def cache_contains_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable, - query_id: Optional[int] = None) -> 'APIResult': + ) -> 'APIResult': """ Returns a value indicating whether all given keys are present in cache. :param connection: connection to Ignite server, :param cache_info: cache meta info, :param keys: a list of keys or (key, type hint) tuples, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a bool value retrieved on success: `True` when all keys are present, `False` otherwise, non-zero status and an error description on failure. """ - return __cache_contains_keys(connection, cache_info, keys, query_id) + return __cache_contains_keys(connection, cache_info, keys) -async def cache_contains_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_contains_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Async version of cache_contains_keys. """ - return await __cache_contains_keys(connection, cache_info, keys, query_id) + return await __cache_contains_keys(connection, cache_info, keys) -def __cache_contains_keys(connection, cache_info, keys, query_id): +def __cache_contains_keys(connection, cache_info, keys): query_struct = Query( OP_CACHE_CONTAINS_KEYS, [ ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -325,8 +294,7 @@ def __cache_contains_keys(connection, cache_info, keys, query_id): def cache_get_and_put(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache_info, and returns the previous value for that key, or null value if there was not such key. @@ -339,26 +307,24 @@ def cache_get_and_put(connection: 'Connection', cache_info: CacheInfo, key: Any, should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None if a value is written, non-zero status and an error description in case of error. """ - return __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id) + return __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint) -async def cache_get_and_put_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_get_and_put_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_get_and_put. """ - return await __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id) + return await __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint) -def __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint, query_id): +def __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_GET_AND_PUT, [ @@ -366,7 +332,6 @@ def __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), ], - query_id=query_id, ) return query_perform( query_struct, connection, @@ -383,8 +348,7 @@ def __cache_get_and_put(connection, cache_info, key, value, key_hint, value_hint def cache_get_and_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache, returning previous value for that key, if and only if there is a value currently mapped @@ -398,32 +362,29 @@ def cache_get_and_replace(connection: 'Connection', cache_info: CacheInfo, key: should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id) + return __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint) -async def cache_get_and_replace_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_get_and_replace_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_get_and_replace. """ - return await __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id) + return await __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint) -def __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint, query_id): +def __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_hint): query_struct = Query( OP_CACHE_GET_AND_REPLACE, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -439,8 +400,9 @@ def __cache_get_and_replace(connection, cache_info, key, key_hint, value, value_ ) -def cache_get_and_remove(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +def cache_get_and_remove( + connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Removes the cache entry with specified key, returning the value. @@ -449,28 +411,24 @@ def cache_get_and_remove(connection: 'Connection', cache_info: CacheInfo, key: A :param key: key for the cache entry. Can be of any supported type, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None, non-zero status and an error description otherwise. """ - return __cache_get_and_remove(connection, cache_info, key, key_hint, query_id) + return __cache_get_and_remove(connection, cache_info, key, key_hint) async def cache_get_and_remove_async( - connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': - return await __cache_get_and_remove(connection, cache_info, key, key_hint, query_id) + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': + return await __cache_get_and_remove(connection, cache_info, key, key_hint) -def __cache_get_and_remove(connection, cache_info, key, key_hint, query_id): +def __cache_get_and_remove(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_GET_AND_REMOVE, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -486,8 +444,7 @@ def __cache_get_and_remove(connection, cache_info, key, key_hint, query_id): def cache_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. @@ -500,33 +457,30 @@ def cache_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: An should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) + return __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) -async def cache_put_if_absent_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_put_if_absent_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_put_if_absent. """ - return await __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) + return await __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) -def __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id): +def __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_PUT_IF_ABSENT, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -543,8 +497,7 @@ def __cache_put_if_absent(connection, cache_info, key, value, key_hint, value_hi def cache_get_and_put_if_absent(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key does not already exist. @@ -557,33 +510,30 @@ def cache_get_and_put_if_absent(connection: 'Connection', cache_info: CacheInfo, should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and an old value or None on success, non-zero status and an error description otherwise. """ - return __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) + return __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) -async def cache_get_and_put_if_absent_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_get_and_put_if_absent_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_get_and_put_if_absent. """ - return await __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id) + return await __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint) -def __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint, query_id): +def __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_GET_AND_PUT_IF_ABSENT, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -600,8 +550,7 @@ def __cache_get_and_put_if_absent(connection, cache_info, key, value, key_hint, def cache_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exist. @@ -613,34 +562,31 @@ def cache_replace(connection: 'Connection', cache_info: CacheInfo, key: Any, val should be converted, :param value_hint: (optional) Ignite data type, for which the given value should be converted. - :param query_id: a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id) + return __cache_replace(connection, cache_info, key, value, key_hint, value_hint) -async def cache_replace_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, - key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +async def cache_replace_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, value: Any, + key_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_replace. """ - return await __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id) + return await __cache_replace(connection, cache_info, key, value, key_hint, value_hint) -def __cache_replace(connection, cache_info, key, value, key_hint, value_hint, query_id): +def __cache_replace(connection, cache_info, key, value, key_hint, value_hint): query_struct = Query( OP_CACHE_REPLACE, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -658,7 +604,7 @@ def __cache_replace(connection, cache_info, key, value, key_hint, value_hint, qu def cache_replace_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, - value_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': + value_hint: 'IgniteDataType' = None) -> 'APIResult': """ Puts a value with a given key to cache only if the key already exists and value equals provided sample. @@ -674,29 +620,26 @@ def cache_replace_if_equals(connection: 'Connection', cache_info: CacheInfo, key the given sample should be converted :param value_hint: (optional) Ignite data type, for which the given value should be converted, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ return __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, - sample_hint, value_hint, query_id) + sample_hint, value_hint) async def cache_replace_if_equals_async( connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, value: Any, - key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, value_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_replace_if_equals. """ return await __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, - sample_hint, value_hint, query_id) + sample_hint, value_hint) -def __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, sample_hint, value_hint, query_id): +def __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hint, sample_hint, value_hint): query_struct = Query( OP_CACHE_REPLACE_IF_EQUALS, [ @@ -704,8 +647,7 @@ def __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hi ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), ('value', value_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -722,36 +664,31 @@ def __cache_replace_if_equals(connection, cache_info, key, sample, value, key_hi ) -def cache_clear(connection: 'Connection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': +def cache_clear(connection: 'Connection', cache_info: CacheInfo) -> 'APIResult': """ Clears the cache without notifying listeners or cache writers. :param connection: connection to Ignite server, :param cache_info: cache meta info, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear(connection, cache_info, query_id) + return __cache_clear(connection, cache_info) -async def cache_clear_async( - connection: 'AioConnection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_async(connection: 'AioConnection', cache_info: CacheInfo) -> 'APIResult': """ Async version of cache_clear. """ - return await __cache_clear(connection, cache_info, query_id) + return await __cache_clear(connection, cache_info) -def __cache_clear(connection, cache_info, query_id): +def __cache_clear(connection, cache_info): query_struct = Query( OP_CACHE_CLEAR, [ ('cache_info', CacheInfo), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -761,8 +698,9 @@ def __cache_clear(connection, cache_info, query_id): ) -def cache_clear_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +def cache_clear_key( + connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -771,31 +709,28 @@ def cache_clear_key(connection: 'Connection', cache_info: CacheInfo, key: Any, k :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key should be converted, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_key(connection, cache_info, key, key_hint, query_id) + return __cache_clear_key(connection, cache_info, key, key_hint) -async def cache_clear_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, - key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': +async def cache_clear_key_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_clear_key. """ - return await __cache_clear_key(connection, cache_info, key, key_hint, query_id) + return await __cache_clear_key(connection, cache_info, key, key_hint) -def __cache_clear_key(connection, cache_info, key, key_hint, query_id): +def __cache_clear_key(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_CLEAR_KEY, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -806,40 +741,33 @@ def __cache_clear_key(connection, cache_info, key, key_hint, query_id): ) -def cache_clear_keys( - connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': +def cache_clear_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Clears the cache keys without notifying listeners or cache writers. :param connection: connection to Ignite server, :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_clear_keys(connection, cache_info, keys, query_id) + return __cache_clear_keys(connection, cache_info, keys) -async def cache_clear_keys_async( - connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None -) -> 'APIResult': +async def cache_clear_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Async version of cache_clear_keys. """ - return await __cache_clear_keys(connection, cache_info, keys, query_id) + return await __cache_clear_keys(connection, cache_info, keys) -def __cache_clear_keys(connection, cache_info, keys, query_id): +def __cache_clear_keys(connection, cache_info, keys): query_struct = Query( OP_CACHE_CLEAR_KEYS, [ ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -850,8 +778,9 @@ def __cache_clear_keys(connection, cache_info, keys, query_id): ) -def cache_remove_key(connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_key( + connection: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Clears the cache key without notifying listeners or cache writers. @@ -859,33 +788,30 @@ def cache_remove_key(connection: 'Connection', cache_info: CacheInfo, key: Any, :param cache_info: cache meta info, :param key: key for the cache entry, :param key_hint: (optional) Ignite data type, for which the given key - should be converted, - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, + should be converted :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_key(connection, cache_info, key, key_hint, query_id) + return __cache_remove_key(connection, cache_info, key, key_hint) -async def cache_remove_key_async(connection: 'AioConnection', cache_info: CacheInfo, key: Any, - key_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_key_async( + connection: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_remove_key. """ - return await __cache_remove_key(connection, cache_info, key, key_hint, query_id) + return await __cache_remove_key(connection, cache_info, key, key_hint) -def __cache_remove_key(connection, cache_info, key, key_hint, query_id): +def __cache_remove_key(connection, cache_info, key, key_hint): query_struct = Query( OP_CACHE_REMOVE_KEY, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -901,8 +827,7 @@ def __cache_remove_key(connection, cache_info, key, key_hint, query_id): def cache_remove_if_equals(connection: 'Connection', cache_info: CacheInfo, key: Any, sample: Any, - key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None, - query_id: Optional[int] = None) -> 'APIResult': + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None) -> 'APIResult': """ Removes an entry with a given key if provided value is equal to actual value, notifying listeners and cache writers. @@ -914,35 +839,32 @@ def cache_remove_if_equals(connection: 'Connection', cache_info: CacheInfo, key: :param key_hint: (optional) Ignite data type, for which the given key should be converted, :param sample_hint: (optional) Ignite data type, for whic - the given sample should be converted - :param query_id: (optional) a value generated by client and returned - as-is in response.query_id. When the parameter is omitted, a random - value is generated, + the given sample should be converted, :return: API result data object. Contains zero status and a boolean success code, or non-zero status and an error description if something has gone wrong. """ - return __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id) + return __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint) async def cache_remove_if_equals_async( - connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, key_hint: 'IgniteDataType' = None, - sample_hint: 'IgniteDataType' = None, query_id: Optional[int] = None) -> 'APIResult': + connection: 'AioConnection', cache_info: CacheInfo, key: Any, sample: Any, + key_hint: 'IgniteDataType' = None, sample_hint: 'IgniteDataType' = None +) -> 'APIResult': """ Async version of cache_remove_if_equals. """ - return await __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id) + return await __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint) -def __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint, query_id): +def __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, sample_hint): query_struct = Query( OP_CACHE_REMOVE_IF_EQUALS, [ ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('sample', sample_hint or AnyDataObject), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -958,40 +880,33 @@ def __cache_remove_if_equals(connection, cache_info, key, sample, key_hint, samp ) -def cache_remove_keys( - connection: 'Connection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_keys(connection: 'Connection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Removes entries with given keys, notifying listeners and cache writers. :param connection: connection to Ignite server, :param cache_info: cache meta info, :param keys: list of keys or tuples of (key, key_hint), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_keys(connection, cache_info, keys, query_id) + return __cache_remove_keys(connection, cache_info, keys) -async def cache_remove_keys_async( - connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable, query_id: Optional[int] = None -) -> 'APIResult': +async def cache_remove_keys_async(connection: 'AioConnection', cache_info: CacheInfo, keys: Iterable) -> 'APIResult': """ Async version of cache_remove_keys. """ - return await __cache_remove_keys(connection, cache_info, keys, query_id) + return await __cache_remove_keys(connection, cache_info, keys) -def __cache_remove_keys(connection, cache_info, keys, query_id): +def __cache_remove_keys(connection, cache_info, keys): query_struct = Query( OP_CACHE_REMOVE_KEYS, [ ('cache_info', CacheInfo), ('keys', AnyDataArray()), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -1002,36 +917,31 @@ def __cache_remove_keys(connection, cache_info, keys, query_id): ) -def cache_remove_all(connection: 'Connection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': +def cache_remove_all(connection: 'Connection', cache_info: CacheInfo) -> 'APIResult': """ Removes all entries from cache_info, notifying listeners and cache writers. :param connection: connection to Ignite server, :param cache_info: cache meta info, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __cache_remove_all(connection, cache_info, query_id) + return __cache_remove_all(connection, cache_info) -async def cache_remove_all_async( - connection: 'AioConnection', cache_info: CacheInfo, query_id: Optional[int] = None) -> 'APIResult': +async def cache_remove_all_async(connection: 'AioConnection', cache_info: CacheInfo) -> 'APIResult': """ Async version of cache_remove_all. """ - return await __cache_remove_all(connection, cache_info, query_id) + return await __cache_remove_all(connection, cache_info) -def __cache_remove_all(connection, cache_info, query_id): +def __cache_remove_all(connection, cache_info): query_struct = Query( OP_CACHE_REMOVE_ALL, [ ('cache_info', CacheInfo), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -1041,8 +951,9 @@ def __cache_remove_all(connection, cache_info, query_id): ) -def cache_get_size(connection: 'Connection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None, - query_id: Optional[int] = None) -> 'APIResult': +def cache_get_size( + connection: 'Connection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None +) -> 'APIResult': """ Gets the number of entries in cache. @@ -1051,24 +962,20 @@ def cache_get_size(connection: 'Connection', cache_info: CacheInfo, peek_modes: :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to pimary cache partitions (PeekModes.PRIMARY), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a number of cache entries on success, non-zero status and an error description otherwise. """ - return __cache_get_size(connection, cache_info, peek_modes, query_id) + return __cache_get_size(connection, cache_info, peek_modes) async def cache_get_size_async( - connection: 'AioConnection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None, - query_id: Optional[int] = None + connection: 'AioConnection', cache_info: CacheInfo, peek_modes: Union[int, list, tuple] = None ) -> 'APIResult': - return await __cache_get_size(connection, cache_info, peek_modes, query_id) + return await __cache_get_size(connection, cache_info, peek_modes) -def __cache_get_size(connection, cache_info, peek_modes, query_id): +def __cache_get_size(connection, cache_info, peek_modes): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1079,8 +986,7 @@ def __cache_get_size(connection, cache_info, peek_modes, query_id): [ ('cache_info', CacheInfo), ('peek_modes', ByteArray), - ], - query_id=query_id, + ] ) return query_perform( query_struct, connection, @@ -1095,8 +1001,10 @@ def __cache_get_size(connection, cache_info, peek_modes, query_id): ) -def cache_local_peek(conn: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None) -> 'APIResult': +def cache_local_peek( + conn: 'Connection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, + peek_modes: Union[int, list, tuple] = None +) -> 'APIResult': """ Peeks at in-memory cached value using default optional peek mode. @@ -1111,26 +1019,23 @@ def cache_local_peek(conn: 'Connection', cache_info: CacheInfo, key: Any, key_hi :param peek_modes: (optional) limit count to near cache partition (PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache (PeekModes.BACKUP). Defaults to primary cache partitions (PeekModes.PRIMARY), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a peeked value (null if not found). """ - return __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id) + return __cache_local_peek(conn, cache_info, key, key_hint, peek_modes) async def cache_local_peek_async( conn: 'AioConnection', cache_info: CacheInfo, key: Any, key_hint: 'IgniteDataType' = None, - peek_modes: Union[int, list, tuple] = None, query_id: Optional[int] = None + peek_modes: Union[int, list, tuple] = None, ) -> 'APIResult': """ Async version of cache_local_peek. """ - return await __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id) + return await __cache_local_peek(conn, cache_info, key, key_hint, peek_modes) -def __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id): +def __cache_local_peek(conn, cache_info, key, key_hint, peek_modes): if peek_modes is None: peek_modes = [] elif not isinstance(peek_modes, (list, tuple)): @@ -1142,8 +1047,7 @@ def __cache_local_peek(conn, cache_info, key, key_hint, peek_modes, query_id): ('cache_info', CacheInfo), ('key', key_hint or AnyDataObject), ('peek_modes', ByteArray), - ], - query_id=query_id, + ] ) return query_perform( query_struct, conn, diff --git a/pyignite/api/sql.py b/pyignite/api/sql.py index 267bc5b..0f41194 100644 --- a/pyignite/api/sql.py +++ b/pyignite/api/sql.py @@ -23,12 +23,12 @@ ) from pyignite.utils import deprecated from .result import APIResult -from ..queries.query import CacheInfo +from ..queries.cache_info import CacheInfo from ..queries.response import SQLResponse -def scan(conn: 'Connection', cache_info: CacheInfo, page_size: int, partitions: int = -1, local: bool = False, - query_id: int = None) -> APIResult: +def scan(conn: 'Connection', cache_info: CacheInfo, page_size: int, partitions: int = -1, + local: bool = False) -> APIResult: """ Performs scan query. @@ -39,9 +39,6 @@ def scan(conn: 'Connection', cache_info: CacheInfo, page_size: int, partitions: (negative to query entire cache), :param local: (optional) pass True if this query should be executed on local node only. Defaults to False, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -53,15 +50,15 @@ def scan(conn: 'Connection', cache_info: CacheInfo, page_size: int, partitions: * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ - return __scan(conn, cache_info, page_size, partitions, local, query_id) + return __scan(conn, cache_info, page_size, partitions, local) async def scan_async(conn: 'AioConnection', cache_info: CacheInfo, page_size: int, partitions: int = -1, - local: bool = False, query_id: int = None) -> APIResult: + local: bool = False) -> APIResult: """ Async version of scan. """ - return await __scan(conn, cache_info, page_size, partitions, local, query_id) + return await __scan(conn, cache_info, page_size, partitions, local) def __query_result_post_process(result): @@ -70,7 +67,7 @@ def __query_result_post_process(result): return result -def __scan(conn, cache_info, page_size, partitions, local, query_id): +def __scan(conn, cache_info, page_size, partitions, local): query_struct = Query( OP_QUERY_SCAN, [ @@ -79,8 +76,7 @@ def __scan(conn, cache_info, page_size, partitions, local, query_id): ('page_size', Int), ('partitions', Int), ('local', Bool), - ], - query_id=query_id, + ] ) return query_perform( query_struct, conn, @@ -100,16 +96,13 @@ def __scan(conn, cache_info, page_size, partitions, local, query_id): ) -def scan_cursor_get_page(conn: 'Connection', cursor: int, query_id: int = None) -> APIResult: +def scan_cursor_get_page(conn: 'Connection', cursor: int) -> APIResult: """ Fetches the next scan query cursor page by cursor ID that is obtained from `scan` function. :param conn: connection to Ignite server, :param cursor: cursor ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -120,20 +113,19 @@ def scan_cursor_get_page(conn: 'Connection', cursor: int, query_id: int = None) * `more`: bool, True if more data is available for subsequent ‘scan_cursor_get_page’ calls. """ - return __scan_cursor_get_page(conn, cursor, query_id) + return __scan_cursor_get_page(conn, cursor) -async def scan_cursor_get_page_async(conn: 'AioConnection', cursor: int, query_id: int = None) -> APIResult: - return await __scan_cursor_get_page(conn, cursor, query_id) +async def scan_cursor_get_page_async(conn: 'AioConnection', cursor: int) -> APIResult: + return await __scan_cursor_get_page(conn, cursor) -def __scan_cursor_get_page(conn, cursor, query_id): +def __scan_cursor_get_page(conn, cursor): query_struct = Query( OP_QUERY_SCAN_CURSOR_GET_PAGE, [ ('cursor', Long), - ], - query_id=query_id, + ] ) return query_perform( query_struct, conn, @@ -154,7 +146,7 @@ def sql( conn: 'Connection', cache_info: CacheInfo, table_name: str, query_str: str, page_size: int, query_args=None, distributed_joins: bool = False, replicated_only: bool = False, - local: bool = False, timeout: int = 0, query_id: int = None + local: bool = False, timeout: int = 0 ) -> APIResult: """ Executes an SQL query over data stored in the cluster. The query returns @@ -173,9 +165,6 @@ def sql( on local node only. Defaults to False, :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout (default), - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -203,8 +192,7 @@ def sql( ('replicated_only', Bool), ('page_size', Int), ('timeout', Long), - ], - query_id=query_id, + ] ) result = query_struct.perform( conn, @@ -232,17 +220,12 @@ def sql( @deprecated(version='1.2.0', reason="This API is deprecated and will be removed in the following major release. " "Use sql_fields instead") -def sql_cursor_get_page( - conn: 'Connection', cursor: int, query_id: int = None, -) -> APIResult: +def sql_cursor_get_page(conn: 'Connection', cursor: int) -> APIResult: """ Retrieves the next SQL query cursor page by cursor ID from `sql`. :param conn: connection to Ignite server, :param cursor: cursor ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -258,8 +241,7 @@ def sql_cursor_get_page( OP_QUERY_SQL_CURSOR_GET_PAGE, [ ('cursor', Long), - ], - query_id=query_id, + ] ) result = query_struct.perform( conn, @@ -283,7 +265,7 @@ def sql_fields( local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, - timeout: int = 0, query_id: int = None + timeout: int = 0 ) -> APIResult: """ Performs SQL fields query. @@ -313,9 +295,6 @@ def sql_fields( :param max_rows: (optional) query-wide maximum of rows. :param timeout: (optional) non-negative timeout value in ms. Zero disables timeout. - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -329,7 +308,7 @@ def sql_fields( """ return __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, - timeout, query_id) + timeout) async def sql_fields_async( @@ -339,19 +318,19 @@ async def sql_fields_async( local: bool = False, replicated_only: bool = False, enforce_join_order: bool = False, collocated: bool = False, lazy: bool = False, include_field_names: bool = False, max_rows: int = -1, - timeout: int = 0, query_id: int = None + timeout: int = 0 ) -> APIResult: """ Async version of sql_fields. """ return await __sql_fields(conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, replicated_only, enforce_join_order, collocated, lazy, - include_field_names, max_rows, timeout, query_id) + include_field_names, max_rows, timeout) def __sql_fields( conn, cache_info, query_str, page_size, query_args, schema, statement_type, distributed_joins, local, - replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout, query_id + replicated_only, enforce_join_order, collocated, lazy, include_field_names, max_rows, timeout ): if query_args is None: query_args = [] @@ -375,7 +354,6 @@ def __sql_fields( ('timeout', Long), ('include_field_names', Bool), ], - query_id=query_id, response_type=SQLResponse ) @@ -403,16 +381,13 @@ def __sql_fields( ) -def sql_fields_cursor_get_page(conn: 'Connection', cursor: int, field_count: int, query_id: int = None) -> APIResult: +def sql_fields_cursor_get_page(conn: 'Connection', cursor: int, field_count: int) -> APIResult: """ Retrieves the next query result page by cursor ID from `sql_fields`. :param conn: connection to Ignite server, :param cursor: cursor ID, :param field_count: a number of fields in a row, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status and a value of type dict with results on success, non-zero status and an error description otherwise. @@ -423,24 +398,22 @@ def sql_fields_cursor_get_page(conn: 'Connection', cursor: int, field_count: int * `more`: bool, True if more data is available for subsequent ‘sql_fields_cursor_get_page’ calls. """ - return __sql_fields_cursor_get_page(conn, cursor, field_count, query_id) + return __sql_fields_cursor_get_page(conn, cursor, field_count) -async def sql_fields_cursor_get_page_async(conn: 'AioConnection', cursor: int, field_count: int, - query_id: int = None) -> APIResult: +async def sql_fields_cursor_get_page_async(conn: 'AioConnection', cursor: int, field_count: int) -> APIResult: """ Async version sql_fields_cursor_get_page. """ - return await __sql_fields_cursor_get_page(conn, cursor, field_count, query_id) + return await __sql_fields_cursor_get_page(conn, cursor, field_count) -def __sql_fields_cursor_get_page(conn, cursor, field_count, query_id): +def __sql_fields_cursor_get_page(conn, cursor, field_count): query_struct = Query( OP_QUERY_SQL_FIELDS_CURSOR_GET_PAGE, [ ('cursor', Long), - ], - query_id=query_id, + ] ) return query_perform( query_struct, conn, @@ -469,32 +442,28 @@ def __post_process_sql_fields_cursor(result): return result -def resource_close(conn: 'Connection', cursor: int, query_id: int = None) -> APIResult: +def resource_close(conn: 'Connection', cursor: int) -> APIResult: """ Closes a resource, such as query cursor. :param conn: connection to Ignite server, :param cursor: cursor ID, - :param query_id: (optional) a value generated by client and returned as-is - in response.query_id. When the parameter is omitted, a random value - is generated, :return: API result data object. Contains zero status on success, non-zero status and an error description otherwise. """ - return __resource_close(conn, cursor, query_id) + return __resource_close(conn, cursor) -async def resource_close_async(conn: 'AioConnection', cursor: int, query_id: int = None) -> APIResult: - return await __resource_close(conn, cursor, query_id) +async def resource_close_async(conn: 'AioConnection', cursor: int) -> APIResult: + return await __resource_close(conn, cursor) -def __resource_close(conn, cursor, query_id): +def __resource_close(conn, cursor): query_struct = Query( OP_RESOURCE_CLOSE, [ ('cursor', Long), - ], - query_id=query_id, + ] ) return query_perform( query_struct, conn, diff --git a/pyignite/api/tx_api.py b/pyignite/api/tx_api.py new file mode 100644 index 0000000..ee8de07 --- /dev/null +++ b/pyignite/api/tx_api.py @@ -0,0 +1,124 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextvars + +import attr + +from pyignite.datatypes import Byte, String, Long, Int, Bool +from pyignite.exceptions import CacheError +from pyignite.queries import Query, query_perform +from pyignite.queries.op_codes import OP_TX_START, OP_TX_END + +__CURRENT_TX = contextvars.ContextVar('current_tx', default=None) + + +def get_tx_id(): + ctx = __CURRENT_TX.get() if __CURRENT_TX else None + return ctx.tx_id if ctx else None + + +def get_tx_connection(): + ctx = __CURRENT_TX.get() if __CURRENT_TX else None + return ctx.conn if ctx else None + + +@attr.s +class TransactionContext: + tx_id = attr.ib(type=int, default=None) + conn = attr.ib(default=None) + + +def tx_start(conn, concurrency, isolation, timeout: int = 0, label: str = None): + result = __tx_start(conn, concurrency, isolation, timeout, label) + return __tx_start_post_process(result, conn) + + +async def tx_start_async(conn, concurrency, isolation, timeout: int = 0, label: str = None): + result = await __tx_start(conn, concurrency, isolation, timeout, label) + return __tx_start_post_process(result, conn) + + +def __tx_start(conn, concurrency, isolation, timeout, label): + query_struct = Query( + OP_TX_START, + [ + ('concurrency', Byte), + ('isolation', Byte), + ('timeout', Long), + ('label', String) + ] + ) + return query_perform( + query_struct, conn, + query_params={ + 'concurrency': concurrency, + 'isolation': isolation, + 'timeout': timeout, + 'label': label + }, + response_config=[ + ('tx_id', Int) + ] + ) + + +def tx_end(tx_id, committed): + ctx = __CURRENT_TX.get() + + if not ctx or ctx.tx_id != tx_id: + raise CacheError("Cannot commit transaction from different thread or coroutine") + + try: + return __tx_end(ctx.conn, tx_id, committed) + finally: + __CURRENT_TX.set(None) + + +async def tx_end_async(tx_id, committed): + ctx = __CURRENT_TX.get() + + if not ctx or ctx.tx_id != tx_id: + raise CacheError("Cannot commit transaction from different thread or coroutine") + + try: + return await __tx_end(ctx.conn, tx_id, committed) + finally: + __CURRENT_TX.set(None) + + +def __tx_end(conn, tx_id, committed): + query_struct = Query( + OP_TX_END, + [ + ('tx_id', Int), + ('committed', Bool) + ], + ) + return query_perform( + query_struct, conn, + query_params={ + 'tx_id': tx_id, + 'committed': committed + } + ) + + +def __tx_start_post_process(result, conn): + if result.status == 0: + tx_id = result.value['tx_id'] + __CURRENT_TX.set(TransactionContext(tx_id, conn)) + result.value = tx_id + return result diff --git a/pyignite/cache.py b/pyignite/cache.py index c0aaaec..79fa0f5 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -15,10 +15,11 @@ from typing import Any, Iterable, Optional, Tuple, Union +from .api.tx_api import get_tx_connection from .datatypes import prop_codes, ExpiryPolicy from .datatypes.internal import AnyDataObject from .exceptions import CacheCreationError, CacheError, ParameterError, SQLError, NotSupportedByClusterError -from .queries.query import CacheInfo +from .queries.cache_info import CacheInfo from .utils import cache_id, status_to_exception from .api.cache_config import ( cache_create, cache_create_with_config, cache_get_or_create, cache_get_or_create_with_config, cache_destroy, @@ -177,7 +178,8 @@ def __init__(self, client: 'Client', name: str, expiry_policy: ExpiryPolicy = No super().__init__(client, name, expiry_policy) def _get_best_node(self, key=None, key_hint=None): - return self.client.get_best_node(self, key, key_hint) + tx_conn = get_tx_connection() + return tx_conn if tx_conn else self.client.get_best_node(self, key, key_hint) @property def settings(self) -> Optional[dict]: diff --git a/pyignite/client.py b/pyignite/client.py index 01ee373..b411a2b 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -44,7 +44,7 @@ import random import re from itertools import chain -from typing import Iterable, Type, Union, Any, Dict +from typing import Iterable, Type, Union, Any, Dict, Optional from .api import cache_get_node_partitions from .api.binary import get_binary_type, put_binary_type @@ -54,12 +54,13 @@ from .cache import Cache, create_cache, get_cache, get_or_create_cache, BaseCache from .connection import Connection from .constants import IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER, AFFINITY_RETRIES, AFFINITY_DELAY -from .datatypes import BinaryObject, AnyDataObject +from .datatypes import BinaryObject, AnyDataObject, TransactionConcurrency, TransactionIsolation from .datatypes.base import IgniteDataType from .datatypes.internal import tc_map from .exceptions import BinaryTypeError, CacheError, ReconnectError, connection_errors -from .queries.query import CacheInfo +from .queries.cache_info import CacheInfo from .stream import BinaryStream, READ_BACKWARD +from .transaction import Transaction from .utils import ( cache_id, capitalize, entity_id, schema_id, process_delimiter, status_to_exception, is_iterable, get_field_by_id, unsigned @@ -734,9 +735,9 @@ def sql( elif isinstance(cache, Cache): c_info = cache.cache_info else: - c_info = None + c_info = CacheInfo(protocol_context=self.protocol_context) - if c_info: + if c_info.cache_id: schema = None return SqlFieldsCursor(self, c_info, query_str, page_size, query_args, schema, statement_type, @@ -750,3 +751,19 @@ def get_cluster(self) -> 'Cluster': :return: :py:class:`~pyignite.cluster.Cluster` instance. """ return Cluster(self) + + def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency.PESSIMISTIC, + isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ, + timeout: Union[int, float] = 0, label: Optional[str] = None) -> 'Transaction': + """ + Start thin client transaction. + + :param concurrency: (optional) transaction concurrency, see + :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency` + :param isolation: (optional) transaction isolation level, see + :py:class:`~pyignite.datatypes.transactions.TransactionIsolation` + :param timeout: (optional) transaction timeout in seconds if float, in millis if int + :param label: (optional) transaction label. + :return: :py:class:`~pyignite.transaction.Transaction` instance. + """ + return Transaction(self, concurrency, isolation, timeout, label) diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index 020f8d4..86993ba 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -29,9 +29,7 @@ # limitations under the License. import asyncio -from asyncio import Lock from collections import OrderedDict -from io import BytesIO from typing import Union from pyignite.constants import PROTOCOLS, PROTOCOL_BYTE_ORDER @@ -39,10 +37,66 @@ from .bitmask_feature import BitmaskFeature from .connection import BaseConnection -from .handshake import HandshakeRequest, HandshakeResponse +from .handshake import HandshakeRequest, HandshakeResponse, OP_HANDSHAKE from .protocol_context import ProtocolContext from .ssl import create_ssl_context -from ..stream import AioBinaryStream +from ..stream.binary_stream import BinaryStreamBase + + +class BaseProtocol(asyncio.Protocol): + def __init__(self, conn, handshake_fut): + super().__init__() + self._buffer = bytearray() + self._conn = conn + self._handshake_fut = handshake_fut + + def connection_lost(self, exc): + self.__process_connection_error(exc if exc else SocketError("Connection closed")) + + def connection_made(self, transport: asyncio.WriteTransport) -> None: + try: + self.__send_handshake(transport, self._conn) + except Exception as e: + self._handshake_fut.set_exception(e) + + def data_received(self, data: bytes) -> None: + self._buffer += data + while self.__has_full_response(): + packet_sz = self.__packet_size(self._buffer) + packet = self._buffer[0:packet_sz] + if not self._handshake_fut.done(): + hs_response = self.__parse_handshake(packet, self._conn.client) + self._handshake_fut.set_result(hs_response) + else: + self._conn.on_message(packet) + self._buffer = self._buffer[packet_sz:len(self._buffer)] + + def __has_full_response(self): + if len(self._buffer) > 4: + response_len = int.from_bytes(self._buffer[0:4], byteorder=PROTOCOL_BYTE_ORDER, signed=True) + return response_len + 4 <= len(self._buffer) + + @staticmethod + def __packet_size(buffer): + return int.from_bytes(buffer[0:4], byteorder=PROTOCOL_BYTE_ORDER, signed=True) + 4 + + def __process_connection_error(self, exc): + connected = self._handshake_fut.done() + if not connected: + self._handshake_fut.set_exception(exc) + self._conn.on_connection_lost(exc, connected) + + @staticmethod + def __send_handshake(transport, conn): + hs_request = HandshakeRequest(conn.protocol_context, conn.username, conn.password) + with BinaryStreamBase(client=conn.client) as stream: + hs_request.from_python(stream) + transport.write(stream.getvalue()) + + @staticmethod + def __parse_handshake(data, client): + with BinaryStreamBase(client, data) as stream: + return HandshakeResponse.parse(stream, client.protocol_context) class AioConnection(BaseConnection): @@ -94,21 +148,22 @@ def __init__(self, client: 'AioClient', host: str, port: int, username: str = No :param password: (optional) password to authenticate to Ignite cluster. """ super().__init__(client, host, port, username, password, **ssl_params) - self._mux = Lock() - self._reader = None - self._writer = None + self._pending_reqs = {} + self._transport = None + self._loop = asyncio.get_event_loop() + self._closed = False @property def closed(self) -> bool: """ Tells if socket is closed. """ - return self._writer is None + return self._closed or not self._transport or self._transport.is_closing() async def connect(self) -> Union[dict, OrderedDict]: """ Connect to the given server node with protocol version fallback. """ - async with self._mux: - return await self._connect() + self._closed = False + return await self._connect() async def _connect(self) -> Union[dict, OrderedDict]: detecting_protocol = False @@ -139,6 +194,20 @@ async def _connect(self) -> Union[dict, OrderedDict]: self.failed = False return result + def on_connection_lost(self, error, reconnect=False): + self.failed = True + for _, fut in self._pending_reqs.items(): + fut.set_exception(error) + self._pending_reqs.clear() + if reconnect and not self._closed: + self._loop.create_task(self._reconnect()) + + def on_message(self, data): + req_id = int.from_bytes(data[4:12], byteorder=PROTOCOL_BYTE_ORDER, signed=True) + if req_id in self._pending_reqs: + self._pending_reqs[req_id].set_result(data) + del self._pending_reqs[req_id] + async def _connect_version(self) -> Union[dict, OrderedDict]: """ Connect to the given server node using protocol version @@ -146,122 +215,56 @@ async def _connect_version(self) -> Union[dict, OrderedDict]: """ ssl_context = create_ssl_context(self.ssl_params) - self._reader, self._writer = await asyncio.open_connection(self.host, self.port, ssl=ssl_context) + handshake_fut = self._loop.create_future() + self._transport, _ = await self._loop.create_connection(lambda: BaseProtocol(self, handshake_fut), + host=self.host, port=self.port, ssl=ssl_context) + hs_response = await handshake_fut - protocol_context = self.client.protocol_context + if hs_response.op_code == 0: + self._close_transport() + self._process_handshake_error(hs_response) - hs_request = HandshakeRequest( - protocol_context, - self.username, - self.password - ) - - with AioBinaryStream(self.client) as stream: - await hs_request.from_python_async(stream) - await self._send(stream.getvalue(), reconnect=False) - - with AioBinaryStream(self.client, await self._recv(reconnect=False)) as stream: - hs_response = await HandshakeResponse.parse_async(stream, self.protocol_context) - - if hs_response.op_code == 0: - self._close() - self._process_handshake_error(hs_response) - - return hs_response + return hs_response async def reconnect(self): - async with self._mux: - await self._reconnect() + await self._reconnect() async def _reconnect(self): if self.alive: return - self._close() - + self._close_transport() # connect and silence the connection errors try: await self._connect() except connection_errors: pass - async def request(self, data: Union[bytes, bytearray]) -> bytearray: + async def request(self, query_id, data: Union[bytes, bytearray]) -> bytearray: """ Perform request. - + :param query_id: id of query. :param data: bytes to send. """ - async with self._mux: - await self._send(data) - return await self._recv() - - async def _send(self, data: Union[bytes, bytearray], reconnect=True): - if self.closed: + if not self.alive: raise SocketError('Attempt to use closed connection.') - try: - self._writer.write(data) - await self._writer.drain() - except connection_errors: - self.failed = True - if reconnect: - await self._reconnect() - raise + return await self._send(query_id, data) - async def _recv(self, reconnect=True) -> bytearray: - if self.closed: - raise SocketError('Attempt to use closed connection.') - - data = bytearray(1024) - buffer = memoryview(data) - bytes_total_received, bytes_to_receive = 0, 0 - while True: - try: - chunk = await self._reader.read(len(buffer)) - bytes_received = len(chunk) - if bytes_received == 0: - raise SocketError('Connection broken.') - - buffer[0:bytes_received] = chunk - bytes_total_received += bytes_received - except connection_errors: - self.failed = True - if reconnect: - await self._reconnect() - raise - - if bytes_total_received < 4: - continue - elif bytes_to_receive == 0: - response_len = int.from_bytes(data[0:4], PROTOCOL_BYTE_ORDER) - bytes_to_receive = response_len - - if response_len + 4 > len(data): - buffer.release() - data.extend(bytearray(response_len + 4 - len(data))) - buffer = memoryview(data)[bytes_total_received:] - continue - - if bytes_total_received >= bytes_to_receive: - buffer.release() - break - - buffer = buffer[bytes_received:] - - return data + async def _send(self, query_id, data): + fut = self._loop.create_future() + self._pending_reqs[query_id] = fut + self._transport.write(data) + return await fut async def close(self): - async with self._mux: - self._close() + self._closed = True + self._close_transport() - def _close(self): + def _close_transport(self): """ Close connection. """ - if self._writer: - try: - self._writer.close() - except connection_errors: - pass - - self._writer, self._reader = None, None + if self._transport: + self._transport.close() + self._transport = None diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py index be23e56..0f43aa4 100644 --- a/pyignite/connection/protocol_context.py +++ b/pyignite/connection/protocol_context.py @@ -87,6 +87,12 @@ def is_status_flags_supported(self) -> bool: """ return self.version >= (1, 4, 0) + def is_transactions_supported(self) -> bool: + """ + Check whether transactions supported by the current protocol. + """ + return self.version >= (1, 6, 0) + def is_feature_flags_supported(self) -> bool: """ Check whether feature flags supported by the current protocol. diff --git a/pyignite/datatypes/__init__.py b/pyignite/datatypes/__init__.py index 4f78dce..0ebe56a 100644 --- a/pyignite/datatypes/__init__.py +++ b/pyignite/datatypes/__init__.py @@ -27,3 +27,4 @@ from .standard import * from .cluster_state import ClusterState from .expiry_policy import ExpiryPolicy +from .transactions import TransactionIsolation, TransactionConcurrency diff --git a/pyignite/datatypes/cache_config.py b/pyignite/datatypes/cache_config.py index a2b4322..4ac28e4 100644 --- a/pyignite/datatypes/cache_config.py +++ b/pyignite/datatypes/cache_config.py @@ -21,6 +21,7 @@ __all__ = [ 'get_cache_config_struct', 'CacheMode', 'PartitionLossPolicy', 'RebalanceMode', 'WriteSynchronizationMode', 'IndexType', + 'CacheAtomicityMode' ] diff --git a/pyignite/datatypes/transactions.py b/pyignite/datatypes/transactions.py new file mode 100644 index 0000000..83e6c06 --- /dev/null +++ b/pyignite/datatypes/transactions.py @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from enum import IntEnum + + +class TransactionConcurrency(IntEnum): + """ + Defines different cache transaction concurrency control. + """ + + #: Optimistic concurrency control. + OPTIMISTIC = 0 + + #: Pessimistic concurrency control. + PESSIMISTIC = 1 + + +class TransactionIsolation(IntEnum): + """ + Defines different cache transaction isolation levels. + """ + + #: Read committed isolation level.Read committed isolation level. + READ_COMMITTED = 0 + + #: Repeatable read isolation level. + REPEATABLE_READ = 1 + + #: Serializable isolation level. + SERIALIZABLE = 2 diff --git a/pyignite/exceptions.py b/pyignite/exceptions.py index 215ccd0..fdf1261 100644 --- a/pyignite/exceptions.py +++ b/pyignite/exceptions.py @@ -103,10 +103,18 @@ class ClusterError(Exception): class NotSupportedByClusterError(Exception): """ - This exception is raised, whenever cluster is not supported specific + This exception is raised, whenever cluster does not supported specific operation probably because it is outdated. """ pass +class NotSupportedError(Exception): + """ + This exception is raised, whenever client does not support specific + operation. + """ + pass + + connection_errors = (IOError, OSError, EOFError) diff --git a/pyignite/queries/cache_info.py b/pyignite/queries/cache_info.py new file mode 100644 index 0000000..6caf3ce --- /dev/null +++ b/pyignite/queries/cache_info.py @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import attr + +from pyignite.api.tx_api import get_tx_id +from pyignite.connection.protocol_context import ProtocolContext +from pyignite.constants import PROTOCOL_BYTE_ORDER +from pyignite.datatypes import ExpiryPolicy +from pyignite.exceptions import NotSupportedByClusterError + + +@attr.s +class CacheInfo: + cache_id = attr.ib(kw_only=True, type=int, default=0) + expiry_policy = attr.ib(kw_only=True, type=ExpiryPolicy, default=None) + protocol_context = attr.ib(kw_only=True, type=ProtocolContext) + + TRANSACTIONS_MASK = 0x02 + EXPIRY_POLICY_MASK = 0x04 + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + @classmethod + def from_python(cls, stream, value): + cache_id = value.cache_id if value else 0 + expiry_policy = value.expiry_policy if value else None + flags = 0 + + stream.write(cache_id.to_bytes(4, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) + + if expiry_policy: + if not value.protocol_context.is_expiry_policy_supported(): + raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") + flags |= cls.EXPIRY_POLICY_MASK + + tx_id = get_tx_id() + if value.protocol_context.is_transactions_supported() and tx_id: + flags |= cls.TRANSACTIONS_MASK + + stream.write(flags.to_bytes(1, byteorder=PROTOCOL_BYTE_ORDER)) + + if expiry_policy: + ExpiryPolicy.write_policy(stream, expiry_policy) + + if flags & cls.TRANSACTIONS_MASK: + stream.write(tx_id.to_bytes(4, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) diff --git a/pyignite/queries/op_codes.py b/pyignite/queries/op_codes.py index c152f7c..cf19b11 100644 --- a/pyignite/queries/op_codes.py +++ b/pyignite/queries/op_codes.py @@ -66,5 +66,8 @@ OP_GET_BINARY_TYPE = 3002 OP_PUT_BINARY_TYPE = 3003 +OP_TX_START = 4000 +OP_TX_END = 4001 + OP_CLUSTER_GET_STATE = 5000 OP_CLUSTER_CHANGE_STATE = 5001 diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index d971eef..4bcab9f 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -15,16 +15,12 @@ import ctypes from io import SEEK_CUR -from random import randint import attr from pyignite.api.result import APIResult from pyignite.connection import Connection, AioConnection -from pyignite.connection.protocol_context import ProtocolContext -from pyignite.constants import MIN_LONG, MAX_LONG, RHF_TOPOLOGY_CHANGED, PROTOCOL_BYTE_ORDER -from pyignite.datatypes import ExpiryPolicy -from pyignite.exceptions import NotSupportedByClusterError +from pyignite.constants import MAX_LONG, RHF_TOPOLOGY_CHANGED from pyignite.queries.response import Response from pyignite.stream import AioBinaryStream, BinaryStream, READ_BACKWARD @@ -47,42 +43,29 @@ def _internal(): return _internal() -@attr.s -class CacheInfo: - cache_id = attr.ib(kw_only=True, type=int) - expiry_policy = attr.ib(kw_only=True, type=ExpiryPolicy, default=None) - protocol_context = attr.ib(kw_only=True, type=ProtocolContext) - - @classmethod - async def from_python_async(cls, stream, value): - return cls.from_python(stream, value) +_QUERY_COUNTER = 0 - @classmethod - def from_python(cls, stream, value): - cache_id = value.cache_id if value else 0 - expiry_policy = value.expiry_policy if value else None - flags = 0 - stream.write(cache_id.to_bytes(4, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) - - if expiry_policy: - if not value.protocol_context.is_expiry_policy_supported(): - raise NotSupportedByClusterError("'ExpiryPolicy' API is not supported by the cluster") - flags |= 0x04 - - stream.write(flags.to_bytes(1, byteorder=PROTOCOL_BYTE_ORDER)) - if expiry_policy: - ExpiryPolicy.write_policy(stream, expiry_policy) +def _get_query_id(): + global _QUERY_COUNTER + if _QUERY_COUNTER >= MAX_LONG: + return 0 + _QUERY_COUNTER += 1 + return _QUERY_COUNTER @attr.s class Query: op_code = attr.ib(type=int) following = attr.ib(type=list, factory=list) - query_id = attr.ib(type=int, default=None) + query_id = attr.ib(type=int) response_type = attr.ib(type=type(Response), default=Response) _query_c_type = None + @query_id.default + def _set_query_id(self): + return _get_query_id() + @classmethod def build_c_type(cls): if cls._query_c_type is None: @@ -119,14 +102,14 @@ async def from_python_async(self, stream, values: dict = None): self.__write_header(stream, header, init_pos) def _build_header(self, stream): + global _QUERY_COUNTER header_class = self.build_c_type() header_len = ctypes.sizeof(header_class) stream.seek(header_len, SEEK_CUR) header = header_class() header.op_code = self.op_code - if self.query_id is None: - header.query_id = randint(MIN_LONG, MAX_LONG) + header.query_id = self.query_id return header @@ -185,7 +168,7 @@ async def perform_async( """ with AioBinaryStream(conn.client) as stream: await self.from_python_async(stream, query_params) - data = await conn.request(stream.getvalue()) + data = await conn.request(self.query_id, stream.getvalue()) response_struct = self.response_type(protocol_context=conn.protocol_context, following=response_config, **kwargs) diff --git a/pyignite/transaction.py b/pyignite/transaction.py new file mode 100644 index 0000000..5bafa6b --- /dev/null +++ b/pyignite/transaction.py @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import Union + +from pyignite.api.tx_api import tx_end, tx_start, tx_end_async, tx_start_async +from pyignite.datatypes import TransactionIsolation, TransactionConcurrency +from pyignite.exceptions import CacheError +from pyignite.utils import status_to_exception + + +def _convert_to_millis(timeout: Union[int, float]) -> int: + if isinstance(timeout, float): + return math.floor(timeout * 1000) + return timeout + + +class Transaction: + """ + Thin client transaction. + """ + def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, + isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): + self.client, self.concurrency = client, concurrency + self.isolation, self.timeout = isolation, _convert_to_millis(timeout) + self.label, self.closed = label, False + self.tx_id = self.__start_tx() + + def commit(self) -> None: + """ + Commit transaction. + """ + if not self.closed: + self.closed = True + return self.__end_tx(True) + + def rollback(self) -> None: + """ + Rollback transaction. + """ + self.close() + + def close(self) -> None: + """ + Close transaction. + """ + if not self.closed: + self.closed = True + return self.__end_tx(False) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + @status_to_exception(CacheError) + def __start_tx(self): + conn = self.client.random_node + return tx_start(conn, self.concurrency, self.isolation, self.timeout, self.label) + + @status_to_exception(CacheError) + def __end_tx(self, committed): + return tx_end(self.tx_id, committed) + + +class AioTransaction: + """ + Async thin client transaction. + """ + def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, + isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): + self.client, self.concurrency = client, concurrency + self.isolation, self.timeout = isolation, _convert_to_millis(timeout) + self.label, self.closed = label, False + + def __await__(self): + return (yield from self.__aenter__().__await__()) + + async def commit(self) -> None: + """ + Commit transaction. + """ + if not self.closed: + self.closed = True + return await self.__end_tx(True) + + async def rollback(self) -> None: + """ + Rollback transaction. + """ + await self.close() + + async def close(self) -> None: + """ + Close transaction. + """ + if not self.closed: + self.closed = True + return await self.__end_tx(False) + + async def __aenter__(self): + self.tx_id = await self.__start_tx() + self.closed = False + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + @status_to_exception(CacheError) + async def __start_tx(self): + conn = await self.client.random_node() + return await tx_start_async(conn, self.concurrency, self.isolation, self.timeout, self.label) + + @status_to_exception(CacheError) + async def __end_tx(self, committed): + return await tx_end_async(self.tx_id, committed) diff --git a/requirements/install.txt b/requirements/install.txt index feb4eb6..aa8290f 100644 --- a/requirements/install.txt +++ b/requirements/install.txt @@ -1,3 +1,4 @@ # these pip packages are necessary for the pyignite to run attrs>=20.3.0 +contextvars>=2.4;python_version<"3.7" diff --git a/tests/affinity/conftest.py b/tests/affinity/conftest.py index da645c1..eca31b2 100644 --- a/tests/affinity/conftest.py +++ b/tests/affinity/conftest.py @@ -54,7 +54,7 @@ def client(connection_param): @pytest.fixture -async def async_client(connection_param): +async def async_client(connection_param, event_loop): client = AioClient(partition_aware=True) try: await client.connect(connection_param) diff --git a/tests/affinity/test_affinity_request_routing.py b/tests/affinity/test_affinity_request_routing.py index 90c71b2..0d0ec24 100644 --- a/tests/affinity/test_affinity_request_routing.py +++ b/tests/affinity/test_affinity_request_routing.py @@ -52,7 +52,7 @@ def patched_send(self, *args, **kwargs): async def patched_send_async(self, *args, **kwargs): """Patched send function that push to queue idx of server to which request is routed.""" - buf = args[0] + buf = args[1] if buf and len(buf) >= 6: op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER) # Filter only caches operation. @@ -229,7 +229,7 @@ def client_routed_cache(client_routed, request): @pytest.fixture -async def async_client_routed(): +async def async_client_routed(event_loop): client = AioClient(partition_aware=True) try: await client.connect(client_routed_connection_string) diff --git a/tests/affinity/test_affinity_single_connection.py b/tests/affinity/test_affinity_single_connection.py index c3d2473..c679bdd 100644 --- a/tests/affinity/test_affinity_single_connection.py +++ b/tests/affinity/test_affinity_single_connection.py @@ -29,7 +29,7 @@ def client(): @pytest.fixture -async def async_client(): +async def async_client(event_loop): client = AioClient(partition_aware=True) try: await client.connect('127.0.0.1', 10801) diff --git a/tests/common/test_transactions.py b/tests/common/test_transactions.py new file mode 100644 index 0000000..0cfa46a --- /dev/null +++ b/tests/common/test_transactions.py @@ -0,0 +1,231 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import itertools +import sys +import time + +import pytest + +from pyignite import AioClient, Client +from pyignite.datatypes import TransactionIsolation, TransactionConcurrency +from pyignite.datatypes.cache_config import CacheAtomicityMode +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_ATOMICITY_MODE +from pyignite.exceptions import CacheError + + +@pytest.fixture +def connection_param(): + return [('127.0.0.1', 10800 + i) for i in range(1, 4)] + + +@pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) +async def async_client(request, connection_param, event_loop): + client = AioClient(partition_aware=request.param == 'with-partition-awareness') + try: + await client.connect(connection_param) + if not client.protocol_context.is_transactions_supported(): + pytest.skip(f'skipped {request.node.name}, transaction api is not supported.') + elif sys.version_info < (3, 7): + pytest.skip(f'skipped {request.node.name}, transaction api is not supported' + f'for async client on python {sys.version}') + else: + yield client + finally: + await client.close() + + +@pytest.fixture(params=['with-partition-awareness', 'without-partition-awareness']) +def client(request, connection_param): + client = Client(partition_aware=request.param == 'with-partition-awareness') + try: + client.connect(connection_param) + if not client.protocol_context.is_transactions_supported(): + pytest.skip(f'skipped {request.node.name}, transaction api is not supported.') + else: + yield client + finally: + client.close() + + +@pytest.fixture +def tx_cache(client): + cache = client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + yield cache + cache.destroy() + + +@pytest.fixture +async def async_tx_cache(async_client): + cache = await async_client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + yield cache + await cache.destroy() + + +@pytest.mark.parametrize( + ['iso_level', 'concurrency'], + itertools.product( + [iso_level for iso_level in TransactionIsolation], + [concurrency for concurrency in TransactionConcurrency] + ) +) +def test_simple_transaction(client, tx_cache, iso_level, concurrency): + with client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + tx_cache.put(1, 1) + tx.commit() + + assert tx_cache.get(1) == 1 + + with client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + tx_cache.put(1, 10) + tx.rollback() + + assert tx_cache.get(1) == 1 + + with client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + tx_cache.put(1, 10) + + assert tx_cache.get(1) == 1 + + +@pytest.mark.parametrize( + ['iso_level', 'concurrency'], + itertools.product( + [iso_level for iso_level in TransactionIsolation], + [concurrency for concurrency in TransactionConcurrency] + ) +) +@pytest.mark.asyncio +async def test_simple_transaction_async(async_client, async_tx_cache, iso_level, concurrency): + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + await async_tx_cache.put(1, 1) + await tx.commit() + + assert await async_tx_cache.get(1) == 1 + + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + await async_tx_cache.put(1, 10) + await tx.rollback() + + assert await async_tx_cache.get(1) == 1 + + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + async_tx_cache.put(1, 10) + + assert await async_tx_cache.get(1) == 1 + + +def test_transactions_timeout(client, tx_cache): + with client.tx_start(timeout=2.0, label='tx-sync') as tx: + tx_cache.put(1, 1) + time.sleep(3.0) + with pytest.raises(CacheError) as to_error: + tx.commit() + assert 'tx-sync' in str(to_error) and 'timed out' in str(to_error) + + +@pytest.mark.asyncio +async def test_transactions_timeout_async(async_client, async_tx_cache): + async def update(i, timeout): + async with async_client.tx_start( + label=f'tx-{i}', timeout=timeout, isolation=TransactionIsolation.READ_COMMITTED, + concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + k1, k2 = (1, 2) if i % 2 == 0 else (2, 1) + v = f'value-{i}' + + await async_tx_cache.put(k1, v) + await async_tx_cache.put(k2, v) + + await tx.commit() + + task = asyncio.gather(*[update(i, 2.0) for i in range(20)], return_exceptions=True) + await asyncio.sleep(5.0) + assert task.done() # Check that all transactions completed or rolled-back on timeout + for i, ex in enumerate(task.result()): + if ex: + assert 'TransactionTimeoutException' in str(ex) or \ + 'Cache transaction timed out' # check that transaction was rolled back. + assert f'tx-{i}' in str(ex) # check that tx label presents in error + + +@pytest.mark.asyncio +@pytest.mark.parametrize('iso_level', [iso_level for iso_level in TransactionIsolation]) +async def test_concurrent_pessimistic_transactions_same_key(async_client, async_tx_cache, iso_level): + async def update(i): + async with async_client.tx_start( + label=f'tx_lbl_{i}', isolation=iso_level, concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + await async_tx_cache.put(1, f'test-{i}') + await tx.commit() + + res = await asyncio.gather(*[update(i) for i in range(20)], return_exceptions=True) + assert not any(res) # Checks that all transactions proceeds + + +@pytest.mark.asyncio +async def test_concurrent_optimistic_transactions_no_deadlock(async_client, async_tx_cache, event_loop): + """ + Check that optimistic transactions are deadlock safe. + """ + async def update(i): + async with async_client.tx_start( + label=f'tx-{i}', isolation=TransactionIsolation.SERIALIZABLE, + concurrency=TransactionConcurrency.OPTIMISTIC + ) as tx: + k1, k2 = (1, 2) if i % 2 == 0 else (2, 1) + v = f'value-{i}' + + await async_tx_cache.put(k1, v) + await async_tx_cache.put(k2, v) + + await tx.commit() + + task = asyncio.gather(*[update(i) for i in range(20)], return_exceptions=True) + await asyncio.sleep(2.0) + assert task.done() # Check that there are not any deadlock. + assert not all(task.result()) # Check that some (or all) transactions proceeds. + for i, ex in enumerate(task.result()): + if ex: + assert 'lock conflict' in str(ex) # check optimistic prepare phase failed + assert f'tx-{i}' in str(ex) # check that tx label presents in error + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + ['iso_level', 'concurrency'], + itertools.product( + [iso_level for iso_level in TransactionIsolation], + [concurrency for concurrency in TransactionConcurrency] + ) +) +async def test_concurrent_transactions(async_client, async_tx_cache, iso_level, concurrency): + async def update(i): + async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: + await async_tx_cache.put(i, f'test-{i}') + if i % 2 == 0: + await tx.commit() + else: + await tx.rollback() + + await asyncio.gather(*[update(i) for i in range(20)], return_exceptions=True) + assert await async_tx_cache.get_all(list(range(20))) == {i: f'test-{i}' for i in range(20) if i % 2 == 0} diff --git a/tests/config/ignite-config.xml.jinja2 b/tests/config/ignite-config.xml.jinja2 index 325a581..22b103e 100644 --- a/tests/config/ignite-config.xml.jinja2 +++ b/tests/config/ignite-config.xml.jinja2 @@ -60,6 +60,7 @@ + {% if use_ssl %} diff --git a/tests/custom/test_cluster.py b/tests/custom/test_cluster.py index f1ffcfd..e94853a 100644 --- a/tests/custom/test_cluster.py +++ b/tests/custom/test_cluster.py @@ -47,10 +47,9 @@ def server2(with_persistence, cleanup): @pytest.fixture(autouse=True) def cluster_api_supported(request, server1): client = Client() - client.connect('127.0.0.1', 10801) - - if not client.protocol_context.is_cluster_api_supported(): - pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') + with client.connect('127.0.0.1', 10801): + if not client.protocol_context.is_cluster_api_supported(): + pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') def test_cluster_set_active(with_persistence): From a53afcccb6c14b119a4d258da03d7d72445f62d5 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 10 Jun 2021 16:28:03 +0300 Subject: [PATCH 40/62] IGNITE-14850 Add documentation to transaction's api - Fixes #41. --- docs/async_examples.rst | 47 ++++++++++- docs/datatypes/cache_props.rst | 4 +- docs/datatypes/parsers.rst | 74 ++++++++--------- docs/examples.rst | 46 ++++++++++ docs/readme.rst | 4 +- examples/transactions.py | 134 ++++++++++++++++++++++++++++++ tests/common/test_transactions.py | 2 + 7 files changed, 269 insertions(+), 42 deletions(-) create mode 100644 examples/transactions.py diff --git a/docs/async_examples.rst b/docs/async_examples.rst index 4bc21ae..322869c 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -78,6 +78,50 @@ Secondly, expiry policy can be set for all cache operations, which are done unde :dedent: 12 :lines: 96-105 +Transactions +------------ +File: `transactions.py`_. + +Client transactions are supported for caches with +:py:attr:`~pyignite.datatypes.cache_config.CacheAtomicityMode.TRANSACTIONAL` mode. + +Let's create transactional cache: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 29-32 + +Let's start a transaction and commit it: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 35-40 + +Let's check that the transaction was committed successfully: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 42-44 + +Let's check that raising exception inside `async with` block leads to transaction's rollback + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 47-58 + +Let's check that timed out transaction is successfully rolled back + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 61-72 + +See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.aio_client.AioClient.tx_start` + SQL --- File: `async_sql.py`_. @@ -173,4 +217,5 @@ Finally, delete the tables used in this example with the following queries: .. _expiry_policy.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/expiry_policy.py .. _async_key_value.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_key_value.py -.. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py \ No newline at end of file +.. _async_sql.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/async_sql.py +.. _transactions.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/transactions.py \ No newline at end of file diff --git a/docs/datatypes/cache_props.rst b/docs/datatypes/cache_props.rst index 380ccf2..77d50f7 100644 --- a/docs/datatypes/cache_props.rst +++ b/docs/datatypes/cache_props.rst @@ -22,7 +22,7 @@ Cache Properties The :mod:`~pyignite.datatypes.prop_codes` module contains a list of ordinal values, that represent various cache settings. -Please refer to the `Apache Ignite Data Grid`_ documentation on cache +Please refer to the `Configuring Caches`_ documentation on cache synchronization, rebalance, affinity and other cache configuration-related matters. @@ -160,7 +160,7 @@ A dict of the following format: - `type_name`: name of the complex object, - `affinity_key_field_name`: name of the affinity key field. -.. _Apache Ignite Data Grid: https://apacheignite.readme.io/docs/data-grid +.. _Configuring Caches: https://ignite.apache.org/docs/latest/configuring-caches/configuration-overview.html Expiry policy ------------- diff --git a/docs/datatypes/parsers.rst b/docs/datatypes/parsers.rst index 92329cc..06ce659 100644 --- a/docs/datatypes/parsers.rst +++ b/docs/datatypes/parsers.rst @@ -136,40 +136,40 @@ with your data, in to some API function as a *type conversion hint*. |0x1b |`Wrapped data`_ |tuple[int, bytes] |:class:`~pyignite.datatypes.complex.WrappedDataObject` | +-------------+--------------------+-------------------------------+------------------------------------------------------------------+ -.. _Byte: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-byte -.. _Short: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-short -.. _Int: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-int -.. _Long: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-long -.. _Float: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-float -.. _Double: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-double -.. _Char: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-char -.. _Bool: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-bool -.. _Null: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-null -.. _String: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-string -.. _UUID: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-uuid-guid- -.. _Timestamp: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-timestamp -.. _Date: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-date -.. _Time: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-time -.. _Decimal: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-decimal -.. _Enum: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-enum -.. _Byte array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-byte-array -.. _Short array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-short-array -.. _Int array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-int-array -.. _Long array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-long-array -.. _Float array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-float-array -.. _Double array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-double-array -.. _Char array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-char-array -.. _Bool array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-bool-array -.. _String array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-string-array -.. _UUID array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-uuid-guid-array -.. _Timestamp array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-timestamp-array -.. _Date array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-date-array -.. _Time array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-time-array -.. _Decimal array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-decimal-array -.. _Object array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-object-collections -.. _Collection: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-collection -.. _Map: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-map -.. _Enum array: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-enum-array -.. _Binary enum: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-binary-enum -.. _Wrapped data: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-wrapped-data -.. _Complex object: https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-complex-object +.. _Byte: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#byte +.. _Short: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#short +.. _Int: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#int +.. _Long: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#long +.. _Float: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#float +.. _Double: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#double +.. _Char: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#char +.. _Bool: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#bool +.. _Null: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#null +.. _String: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#string +.. _UUID: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#uuid-guid +.. _Timestamp: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#timestamp +.. _Date: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#date +.. _Time: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#time +.. _Decimal: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#decimal +.. _Enum: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#enum +.. _Byte array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#byte-array +.. _Short array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#short-array +.. _Int array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#int-array +.. _Long array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#long-array +.. _Float array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#float-array +.. _Double array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#double-array +.. _Char array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#char-array +.. _Bool array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#bool-array +.. _String array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#string-array +.. _UUID array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#uuid-guid-array +.. _Timestamp array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#timestamp-array +.. _Date array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#date-array +.. _Time array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#time-array +.. _Decimal array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#decimal-array +.. _Object array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#object-collections +.. _Collection: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#collection +.. _Map: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#map +.. _Enum array: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#enum-array +.. _Binary enum: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#binary-enum +.. _Wrapped data: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#wrapped-data +.. _Complex object: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#complex-object diff --git a/docs/examples.rst b/docs/examples.rst index 07ec65c..97facdb 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -206,6 +206,51 @@ contents. But it still can be used for interoperability with Java. :dedent: 4 :lines: 56-65 + +Transactions +------------ +File: `transactions.py`_. + +Client transactions are supported for caches with +:py:attr:`~pyignite.datatypes.cache_config.CacheAtomicityMode.TRANSACTIONAL` mode. + +Let's create transactional cache: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 81-84 + +Let's start a transaction and commit it: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 87-91 + +Let's check that the transaction was committed successfully: + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 93-94 + +Let's check that raising exception inside `with` block leads to transaction's rollback + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 97-107 + +Let's check that timed out transaction is successfully rolled back + +.. literalinclude:: ../examples/transactions.py + :language: python + :dedent: 8 + :lines: 110-120 + +See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.client.Client.tx_start` + SQL --- File: `sql.py`_. @@ -717,6 +762,7 @@ with the following message: .. _read_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/read_binary.py .. _create_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/create_binary.py .. _migrate_binary.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/migrate_binary.py +.. _transactions.py: https://github.com/apache/ignite-python-thin-client/blob/master/examples/transactions.py .. _Getting Started: https://ignite.apache.org/docs/latest/thin-clients/python-thin-client .. _PyIgnite GitHub repository: https://github.com/apache/ignite-python-thin-client/blob/master .. _Complex object: https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#complex-object diff --git a/docs/readme.rst b/docs/readme.rst index 807865a..5fc76a7 100644 --- a/docs/readme.rst +++ b/docs/readme.rst @@ -181,8 +181,8 @@ Licensing This is a free software, brought to you on terms of the `Apache License v2`_. -.. _Apache Ignite: https://apacheignite.readme.io/docs/what-is-ignite -.. _binary client protocol: https://apacheignite.readme.io/docs/binary-client-protocol +.. _Apache Ignite: https://ignite.apache.org +.. _binary client protocol: https://ignite.apache.org/docs/latest/binary-client-protocol/binary-client-protocol .. _Apache License v2: http://www.apache.org/licenses/LICENSE-2.0 .. _virtualenv: https://virtualenv.pypa.io/ .. _tox: https://tox.readthedocs.io/en/latest/ diff --git a/examples/transactions.py b/examples/transactions.py new file mode 100644 index 0000000..895837c --- /dev/null +++ b/examples/transactions.py @@ -0,0 +1,134 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import sys +import time + +from pyignite import AioClient, Client +from pyignite.datatypes import TransactionIsolation, TransactionConcurrency +from pyignite.datatypes.prop_codes import PROP_CACHE_ATOMICITY_MODE, PROP_NAME +from pyignite.datatypes.cache_config import CacheAtomicityMode +from pyignite.exceptions import CacheError + + +async def async_example(): + client = AioClient() + async with client.connect('127.0.0.1', 10800): + cache = await client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + + # starting transaction + key = 1 + async with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + await cache.put(key, 'success') + await tx.commit() + + # key=1 value=success + val = await cache.get(key) + print(f"key=1 value={val}") + + # rollback transaction. + try: + async with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + ): + await cache.put(key, 'fail') + raise RuntimeError('test') + except RuntimeError: + pass + + # key=1 value=success + val = await cache.get(key) + print(f"key=1 value={val}") + + # rollback transaction on timeout. + try: + async with client.tx_start(timeout=1.0, label='long-tx') as tx: + await cache.put(key, 'fail') + await asyncio.sleep(2.0) + await tx.commit() + except CacheError as e: + # Cache transaction timed out: GridNearTxLocal[...timeout=1000, ... label=long-tx] + print(e) + + # key=1 value=success + val = await cache.get(1) + print(f"key=1 value={val}") + + # destroy cache + await cache.destroy() + + +def sync_example(): + client = Client() + with client.connect('127.0.0.1', 10800): + cache = client.get_or_create_cache({ + PROP_NAME: 'tx_cache', + PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL + }) + + # starting transaction + with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + ) as tx: + cache.put(1, 'success') + tx.commit() + + # key=1 value=success + print(f"key=1 value={cache.get(1)}") + + # rollback transaction. + try: + with client.tx_start( + isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + ): + cache.put(1, 'fail') + raise RuntimeError('test') + except RuntimeError: + pass + + # key=1 value=success + print(f"key=1 value={cache.get(1)}") + + # rollback transaction on timeout. + try: + with client.tx_start(timeout=1.0, label='long-tx') as tx: + cache.put(1, 'fail') + time.sleep(2.0) + tx.commit() + except CacheError as e: + # Cache transaction timed out: GridNearTxLocal[...timeout=1000, ... label=long-tx] + print(e) + + # key=1 value=success + print(f"key=1 value={cache.get(1)}") + + # destroy cache + cache.destroy() + + +if __name__ == '__main__': + print("Starting sync example") + sync_example() + + if sys.version_info >= (3, 7): + print("Starting async example") + loop = asyncio.get_event_loop() + loop.run_until_complete(async_example()) diff --git a/tests/common/test_transactions.py b/tests/common/test_transactions.py index 0cfa46a..f4efba5 100644 --- a/tests/common/test_transactions.py +++ b/tests/common/test_transactions.py @@ -67,6 +67,7 @@ def tx_cache(client): PROP_NAME: 'tx_cache', PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL }) + time.sleep(1.0) # Need to sleep because of https://issues.apache.org/jira/browse/IGNITE-14868 yield cache cache.destroy() @@ -77,6 +78,7 @@ async def async_tx_cache(async_client): PROP_NAME: 'tx_cache', PROP_CACHE_ATOMICITY_MODE: CacheAtomicityMode.TRANSACTIONAL }) + await asyncio.sleep(1.0) # Need to sleep because of https://issues.apache.org/jira/browse/IGNITE-14868 yield cache await cache.destroy() From 0afab5b9ab6382c4c995dc74861cecb85ffdfaf5 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 10 Jun 2021 19:38:11 +0300 Subject: [PATCH 41/62] Fix transactions code example in docs --- docs/examples.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/examples.rst b/docs/examples.rst index 97facdb..b3bc60e 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -219,35 +219,35 @@ Let's create transactional cache: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 81-84 + :lines: 82-85 Let's start a transaction and commit it: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 87-91 + :lines: 88-92 Let's check that the transaction was committed successfully: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 93-94 + :lines: 94-95 Let's check that raising exception inside `with` block leads to transaction's rollback .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 97-107 + :lines: 98-108 Let's check that timed out transaction is successfully rolled back .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 110-120 + :lines: 111-121 See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.client.Client.tx_start` From c64fb1099f99fcf073d2b8eacc486f103da9adf9 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 10 Jun 2021 19:44:56 +0300 Subject: [PATCH 42/62] Fix async transactions code example in docs --- docs/async_examples.rst | 10 +++++----- examples/transactions.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/async_examples.rst b/docs/async_examples.rst index 322869c..5e60c70 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -90,35 +90,35 @@ Let's create transactional cache: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 29-32 + :lines: 30-33 Let's start a transaction and commit it: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 35-40 + :lines: 36-41 Let's check that the transaction was committed successfully: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 42-44 + :lines: 44-45 Let's check that raising exception inside `async with` block leads to transaction's rollback .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 47-58 + :lines: 48-59 Let's check that timed out transaction is successfully rolled back .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 61-72 + :lines: 62-73 See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.aio_client.AioClient.tx_start` diff --git a/examples/transactions.py b/examples/transactions.py index 895837c..a0c90ba 100644 --- a/examples/transactions.py +++ b/examples/transactions.py @@ -69,7 +69,7 @@ async def async_example(): print(e) # key=1 value=success - val = await cache.get(1) + val = await cache.get(key) print(f"key=1 value={val}") # destroy cache From fbe61f3779a8da3d2218b969cb92d7515312de69 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Fri, 11 Jun 2021 17:20:03 +0300 Subject: [PATCH 43/62] IGNITE-14851 Enable partition awareness by default, fix unnecessary double connect - Fixes #42. --- docs/async_examples.rst | 9 ++-- docs/examples.rst | 10 ++-- docs/images/partitionawareness01.png | Bin 0 -> 35538 bytes docs/images/partitionawareness02.png | Bin 0 -> 31181 bytes docs/index.rst | 1 + docs/partition_awareness.rst | 63 ++++++++++++++++++++++++++ docs/source/pyignite.transaction.rst | 2 +- examples/transactions.py | 31 +++++++------ pyignite/aio_client.py | 11 ++--- pyignite/api/__init__.py | 2 +- pyignite/client.py | 18 ++------ pyignite/connection/aio_connection.py | 34 ++++++++++---- pyignite/connection/connection.py | 6 +-- pyignite/exceptions.py | 2 +- tests/common/test_transactions.py | 2 +- 15 files changed, 129 insertions(+), 62 deletions(-) create mode 100644 docs/images/partitionawareness01.png create mode 100644 docs/images/partitionawareness02.png create mode 100644 docs/partition_awareness.rst diff --git a/docs/async_examples.rst b/docs/async_examples.rst index 5e60c70..af61a75 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -84,6 +84,7 @@ File: `transactions.py`_. Client transactions are supported for caches with :py:attr:`~pyignite.datatypes.cache_config.CacheAtomicityMode.TRANSACTIONAL` mode. +**Supported only python 3.7+** Let's create transactional cache: @@ -97,28 +98,28 @@ Let's start a transaction and commit it: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 36-41 + :lines: 36-42 Let's check that the transaction was committed successfully: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 44-45 + :lines: 45-46 Let's check that raising exception inside `async with` block leads to transaction's rollback .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 48-59 + :lines: 49-61 Let's check that timed out transaction is successfully rolled back .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 62-73 + :lines: 64-75 See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.aio_client.AioClient.tx_start` diff --git a/docs/examples.rst b/docs/examples.rst index b3bc60e..e01f112 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -219,35 +219,35 @@ Let's create transactional cache: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 82-85 + :lines: 84-87 Let's start a transaction and commit it: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 88-92 + :lines: 90-96 Let's check that the transaction was committed successfully: .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 94-95 + :lines: 98-99 Let's check that raising exception inside `with` block leads to transaction's rollback .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 98-108 + :lines: 102-113 Let's check that timed out transaction is successfully rolled back .. literalinclude:: ../examples/transactions.py :language: python :dedent: 8 - :lines: 111-121 + :lines: 116-126 See more info about transaction's parameters in a documentation of :py:meth:`~pyignite.client.Client.tx_start` diff --git a/docs/images/partitionawareness01.png b/docs/images/partitionawareness01.png new file mode 100644 index 0000000000000000000000000000000000000000..51c11a79aa9aecff319309d951a2e9b7c394fc47 GIT binary patch literal 35538 zcmbTdby(D0yC_UbN_T@aLkI{%D;**oLr60;4BaIiN=pnNol-+8-8o1%(nu=ZaK`7^ zdw=hD_J4=#nsv?Gv*x$%&PBBPJB8=iRM<#JNY9lNWi^qIP&1K`kk>F#5D=Bxa0DPG zLrM0Hwm0(OVqYqW22zB^DSjwY!~=mk%vXAWLwMONd?u^?${(wK*5vdM4Jp_>1BQf@ zj(C~EQILW~aQ=BA?EwBiAmWQZDS)$OAyv3YL;~|3N*{r%vZ}G#rVZi?mAl3nozzu& zq18*#-B@NN`p3_jaUw-lpivM~yD+ax%*id6Zjv#z48`p#6n!n$!x(3=E{Q~jBC-qd zF?$nO4s`@6^%)neTs;Vaktre=`+ogmFKd=FoV&1vqpVFmJ9Kk& z@A+*ACTDznr|9m$+j9=ltYg(O zlf*^6x?R!Pj-MS%Py3ZoQGhS-v`UhvhwD7}V_gDF{P9On(1WiJtE2Z!!TrUy%G{X4 zRS4Oj)5*zZBtHG=`uKS6ov8|V7F`vcTPvHCFcE!v!Ez-hkZ4F9{98nD#p5>b z<>BOV`me_z=lrY-#!C=YW-j5B9TJtVT8uDZ&X>emg?cUb%hTkmOd?&doUCm2nDPBy zOw@?qG{{4!Lvk2BS&6&v?En#Xnvue&{|EnO0l55omDroB=n=1cc%yY|ZazFFDoQlA zSX@Ge?1;OhYnYf1jEKr8MZT{17!LUd!K}oj4f&VAG#FnWO5m*kJRxb>CUJ3fJkv%Dt`85;p+ z0{iawdTX!!p1sAJ;I#?V`G-9LD*y|u=gPfbeRG~;$;lLHyYoRZdSt0*2XLFUCi| z6Ed7ZphZ8IFA+hjEi(%I_#ir zqjGK|D|bQSRqZ$B^`92j(ej(0=0HLb`;(o`27~Y_d;Fu^7E!gT9t)G7VFFLY-4)9f$`mc zwQpN@?7^F$0R$K%%r{Y)@{Eo!82foQzW>AVL;Ts+8Vw<>$qa}ofe`AIp_VstLYG}+ z5gA0J{`5E*COa}tvXfI)T`l?+O#V?)_dPw|{^38IRFNcw^ztWWEeJjP;9vWF6dxzc z!}ABpG{y?ec_tocX5pNIV@N_&#U}+0r&Ss8Q>OcQM0S_vkv7HH)w(#nja&ey`xF&b z3M)Kt;dOO=cqSE#ZY3a*60Ed*Y4s=}wuFAH<%GANoXNb!svtmnsU8ZH`Ywvvent-t*=Bd<$N%XIDwa-g(Tc_%ye$jyzNuh9D=P zvlPa}ln>9T$2^>5Pq!Q5|IA9J)lbgFgr`NdV|RMz(VY+_l$Zz`z8%zyHkRvbpIL z?*K(0&2t~HX-83d=A)6k27}|F%4bW^|D|w9+YAZz5x08U`S&jc6EdR^8{i28nuF0C zsIIT@ms9R_IUJ8;=mUcPl8?hUowuZ0(!Y7@VNw*$QkX(){J2=NtgA+i-e>k`zSnds z9h7GO66T7u?~wdK$z&0AtM>WvF$9OT+jS1?#5AsED)udSV0k)rOst=%we<5&$thiS zpDpJ5k_5N10bYV29dL%zn?JpPNR@lEl32QG*wKFB(!nLtCGY@2`Mpf<;ws-2xZhj9;2j z3jn!VDkh_lHJuJaf4N`&I;`2Dz5E4O5j%6qF8bp2I{U^ze6@2&1?y_hp|ZZopF&zj z^}UU-5&y?_@qcte1j&P}ty63_(kBlmt1RqgNubH6_GNdf>`5JCX6|w8IUskm-WcPwM#bbd7`M17Jv3@Y2l!NqNgu5GsR6AwtNSGz5A>;#~tm_ z!6Q!l!ZOCR|Dat8bM6paiV~MwaTOa^dux-hS#IP&=PY>mN?gY7Y0s!>SA#ziG&?uz zza21Y4#3M^Esl(|Pt1&ro_k~qwW>Z;84>BaAwv5IR4Cp-{vUaNjww;?KEOROjt6VraB=w0#mQkjm7%mxH% zcz<<=BW>R9w0bZ1aXHvTQh6iD^B@=v3F(=%>C=yHDnO@OBxA}U0LfD~>EAUQy%Q0& z|JPav+eA97WL^OM->Vo-aCR#5|5`1<$ThH}|6Kp`kpFz2{eWbONI4MM!hf&-`;dPj zVuk}%wBE8r=q@x*PfxK6nO}=oYA`bK(DFWLmK^NA!}xBWDGdAWxny_T0t_zLiH{v0 z$M~Vit7S4SUY#}~8g_jXS%tzkIlVb2Oip4>k4{EpZwO*TpDh?$3Y22!-^Y!TpG zy}n|m{O`x?T?ehXv-68SP@$mtX^lc@#1tbp@vd5gAoq6Y+ zXm0};z^ZU**Aw~6yRk8EaXQ>*J~{YD0V8MPIa^&_U2_!`-dbE>ACEWuMkq-Qm)OhdB5z0m-nH4{}p9 zbaZ%;J&7#e3ur$LDDnK)uVO0}=$o3pfB#-(oy;>*cR)g3?@G2+nE~xHgq4((yaX$x zx%%YfLZQ7=X|R7-U~IR4dv*fJ*?pnXvhL#T?#}!Vf@#M?3!C|aQyoAqaBP0Uzk6LT z3sm7m_(G4z1(9{`RDV%n0B=`9A0lU5Z*GYtoP?faMjpR)JdLe_kOm-t=lW*W{ZcQ* zMoq1)vE9tA|Nh-&WH2%ejgF2obObBy+;)^6eA`)Bt>R%5kpxY>^30C#a=zSI%E`&e z7V{){mjg5-IQs00Jlj~ABamAw-#vs0r7?moGdx=E>}{* zt@!Oioz=mQJk&vNi#xZA;Z#?v5&2%Ml9b*YDc%$RQk@Fj%fkhjVf2=ELUg1(c z=ZuWUsQGmW3MrQ^l;3K20S4}&nWJbr7YTa2SNKHN@W&1^`z#iw3xKoEbcVM+-X0J# zH7$4NvdcOH;ImaaD&J5E*x@L#^nurTy=s}j#OojhomS`HtHxqp`JA4`Ibwc7qWqfQ z&QC{p-441Ftkh_v9*>%S%2P}>Ixfj-_ZUJQSH4|jd#zHgGxf!i@A_aH8u{&HfD3cI z$*Jp^D4>W+(9Ln5ib<>nv^a96v#?UZtXVUxoIVHq&CfLJNu zask})9Ng!87I3w)i=m$_>QM*GVt3sc%^)}N*Kj^W;P7~jVAbufg8QJW6)$^nB3fG7 zoaW0vJ@_j4UWfB$>oXgb&&j}F^`WXs$esX`cKOoyg>UXJg@i(kHMNXuD3+(q`kymKJcb3s20qfYA$UqH zY$q!r;dfaN%!-=)@&>gfcfQlxvcj;b%F^$5+Z9m@20|GYO%duE3nK%+qC^$m{nai)RsqO{ly2XD>oYZ<3RKkVg3Ne0z5ABw?%Dd%Tng z0W{7MER>&kFKzT68r$XFqYCp16h8W2Ki*AdY>%Wx({i7%UN|j% zOI22M<(JT7^V0n8&dC~4QH+lqWb}*TGuh3Ov&1*A@z1>UVok1FgBoqV9_>`eGMI}d z$Is$5x#R1`$2T0zhqgyP|8Ci(Yso!1zVf}8(PP;y*Qt1Waa3zD@D2)G2k?{)e4=N& z7?*l-uecmee!L-I9Yjanja2ZeLhmz=@oh9)P831j zps|rpSoSU5I-tEO}3V=sDB|;Z&mYlvHjS2tkwGPgSx;)14gyCk3A*ZxPm@$nxoCBs4iJiA81AXMh&j z@NKr}1^j49riOxSu-*S8zi&Y{rNwP`chA>Vp=OH=wSA1Qg2;cFO8`C zAjoB{GlWC4kS~{HwR-leA$bicjaYhGFTVI6BuQf6$^^m;|EfRG9&#y)xBbx0LAykEy6y2qqR5x5#8fKNcpxXYh zLtUj9omS`LwmYj0Kr@A)kwh>upFZCG{v-lnLMt$fg+=p}j&55c9Aou~wPe6;z2&6D1a3R8@kKt15rCD;sM1F{7jTwhk5Nx6e#%ZzFp?&N+AO~8$jiVK`?98 z3|DpiZXfXDtqQWR1h1Si zO35ype4eA}{6#AECsZ>PQv@%Zr~2bx$j7A%dDz{$?=hDJJv|clf7(5vkg%IRu^1>F zdkzliLw>j;E|cxvl#R5hC=6MlFyS1~YE0oR^J{X7#G_-D4{NN~E{Y+oeP09g>yf+i zrUkk%q~Bjdh{IXPP_Vi{?}M1>%pf1=m47w*T^(O*CJegoO=jE_6uq*YnQ5r zXh>M}cMY_+}vj5yPN{Xt*D?4I8Mq~6Ku{KBNMv}}Hz zx&7g`Xv2o*0VQQp)Z>u7Y^Ge7Mc^elJa@n*b-X9?d2*qk?PR*?fI&y^T%|@&HRi!x zQi^1N?;|cUcyE3E0|twxj@2UC++c2&u(J@a<;r4%Y?3V95mw_@1FS)=tR`lxnTB4A zmJu-|xnD`kRlz_N&@HPq^l_i7MaC&wwlA)1Qz5+Vm<`G;q-Ui9$4(v9B7FflEmy5^rWuE|yHm z5)o-0PW{#ZxkV(GU4i}IfhT+tQ_(e*jERPg4ks1#PK`kO9#wh(9L2HqoL}!{!O$Xa z2kEa^6l?j89;k{Ct5zjiH8FgWkExQc>S(A9kPJDjT-eh5-_GQe=KLGfu{yO?wm$Z1Ju1x@^=IRIOe$b_JUO4Hn z`|+|tM_?PtdWP$ULxFsJ=m5wgbfVTmp^0e_;S8B~KHy8LCmS6sF!(&92Egk8J77WOi(#aHULr3*+>{$pa1NJ z15q!i9%iI42&_=ng8Sgj#~k+N>$TRgpn*qHI5r{L>6|tQ1>|`tx<*(@GQXV$MPzvh z+G2KW7VB+$4H3@Pj}~7t)kgz=oup9aTD@7FQG(s(vJw=modCpXddbexU=fLj@|NVL zix$ig`aHg{?jnZii3AL(DdJqGU1~h>_k{r}haL}>PUzNnaD7F|_~Dakp5H6*mXzkT zL!WqukYZKJ)*wRth*&wO(BAZ>Ss?v>-erV~sl7_Nf8n{LK=yfx-siYxyx9N@z5>qz zl;n1mxe)9u9p({Kl7n%{Aru1gCIMTW&t>YayW{TBuk9OD#I4b3CMLiEx0xfE4V67Q z6n6hfg+JQpoBN&!vR0F3=>j5Segl>J>crFRhf@+B5@8v5+_Fowv0!?#(MQ;Y^_W5& z3bRAm;*N8JagMs3kQbLNI-PbQoKp=@MZdw7r5(^0Mb;5$?n)pIPogA}V||;Bgu3_CP|F%XuF%3V?OxW77*Ruvvex(=x9PD`->XaR5WG9I z^wm;Pm6u6a1hoq(S>5R&-r#-J;c6qOhmj;&8KIe=(Jg|K)L)gIeR+VeeUz@4R$CLO z|BMObx+ImfdGuMAR-7)&UOAA-KtabbfU3p-V-Ij!GQ1CCgkj&vuXnPXp+Ljx_Y(L} z?m^5GEN6u`U6t2{&5yv5+96~=IPp0mINA&4Y;1ji3C){AJbP|0dLr?6cQc1@!tuBs z^9!Co1ItwmlZ?}hoewV$7=2n|!4oo%888b4rG|2JhzNk;y&sT21`n!zVaPop>`fJg zq^)Y&9c8{^PulyiHIRUhjp0~?6$?``C8nVBw9LFY{{H*EYOsa8Q0{Dp{Z$)8SLch~@*IeN%BTHm!e?-;D=E3+OaSLul0k!ADmxH|!*g#^j_K}v zPv$v1XDw5fU%Ns#{-6PvwdF#Ff$l1RNQ76t1>DYqr&>rak?024k;35?R5g|}T@l5e zx(w^W*-wzdL;`3ZdU}faY~DCBbvGwoE+d`ZN`V4?8M90T?hpppv8HR|u>U!A#B*vP z=VK$jl$+R}j+)`1Hb@%2I~2!41*BrJ-%P*IGRo!O0hOej6RGcVW$^lfE*j5 z$Db~DF2IT{T~7w?5k(5m@aGZu40vDZzb9||N&FO3Z@(85ta+)j_u-9DYjc(pHVJs9 z#i7zKF@Vuqh~U8RXMaj11n0_q746M_3e|PcRuo|pOlhikKkID(K>MqK4kT?f2KEj7 z5gaF}5{M6F!T()MbaQFwZ2P$$^JVGs^d@hq9ik>#2Xy;5zss%|=;y%87ubCI)8Z1i zo9zWHx|Yzvc(DV)EuIK%Vnzek2L0I7CUT7hVuUHre@Ux!`N!vkrZ+dQHbY)$@r4v= zfEKPgL7E(OepgNs`B7S31Q-qF9TL$xEfvS-bj9vu-766y0t4NyzeTPKYt(VO=DIND(f6J8-5FlgT`EN)zt-h< zTLv)ec==3UElYoXn%v1l8W-;z?3 zM_=p2(c6SK0*@=~=c*{?T#GU@$cB9Gj+zj`=gvq>X=CUOGe7_2-x`Ej7@Df6%OJO{ zWNJ^owI2bKHSb_fyoYyw4t!QeLvSWcGhnM0ZneG4n-M!9bAqi}*i^s+vyCer9U%v% ziI5kOrWzu2Qq{EibYiL;cr#|PN%Fa0^!q!-{Ag{c2QePDd7zHH1{+*-y9JzU+~{6L zM)+&15Hy3F1-#oPH~F<^5(-`H}KY0+^~gIkC?pV4_O$>JgCs)VAe?v3Z_IFAOdv+2H;0FWAZun5A^od za7`DG=DZlI%mQRH8CW7(6!QtV5HjSKBICLMEzlVG*K%So3maGIXa}0`)%%0FugRJ& z<)zv_7GRz>3~!t`s+wb5&9Z60UzOir@9-YVz5gZ8Pg+%j{YUg(*`-;4+Gh{O;~ zRoZG9bqQp{s?n3C9&23Gm;r;#Upv{}1UDNmHOgU=r?IBTNj;oT!L!BAkl33?uBd29 z`zZ4q+KZ_zH^m>Yw?`}%{VqM!4t zzUdjJ`0&Zc{?*ts{b%cCwo8qUo#ogh?41JeSew`GPCvdNf66kE(H_LdoOKA)$8T}v ziN?Jk;m|a_d9llZsGhqb5F$dU)_))zclelwQR%)>8C*PC?#+xD1(S_w3%P`rn_8Kt zNFrh-E?CYmP6Bn&95PW9Is_=LpF~r99gB!Dg6ZDer`%hjAolMd%a!PKr8o4zj8F|7Qd zc5qm9A|!!cZOr=`9a$@L2N?NdB3Mlc@6A2iZTu_gFuxWd-I5bUGanz3Vb=T7L+s)P zVC)y2{qJtO8}jmS32u=g%>ZKX)BWd;ns76c@$yL650|p;Ou*BI0%j^zQwS{K0D`D1 z2n4^jlV86+S&VlnQ$%M=o3U22CrYrK$Qi5IDjZ0!#PV&nQiv zSa{b6tms>?&@GWhblr$C`aoa%KtLh`V*`E6uV98S1ef z!}S-V9VJ3kAjhjl6#Wc=;8y`k;#x7o&(I(i&V_=evu*0yeqTN!seg+N*wW;K=tA2N zIfX?=P?WDBH8r(#9?&lXaSBN|O1=&Sm`#2pXcCCyq}jp}TFp!ZRB3RZ{G}1r;X*h@!Tmns+6hJsx)d&_`}UU>A>WHrfx( zh+)u3bDS_^Gzed55BN?MW%YY`*~nqHN2u`&#cN(jF^ae^o`8MJvyJ8nJ_lRK%UFgv z897Ow7a(G5y_$DIGpMUUzuR)PWw-TKqXtB_R=ift#19IFWTR#3j~$@TYhBaW%(q=Z z>?Gg@GB9$l4Jny}-UpaQOf1W$LtG~};c`nP{!8uQG)3w!jcQ@0Q?&LWFGt^V;_}mV zh6l8m^fDgU+mGHqy>?FGqqd&wzZLb6DriUMdtS8M3i;4r!@z691@{C_=-T5z9|*pR zA%RnY6%9ZYD*F^#23{eTOT~Pa`)igJ3!OnIoicRh)^!~a zT~5N%DHtCK3NmG0v7wZY<&~VoDf{J+YJ=50Nt+DhN*Lx#NRdSuCXw|#By^Kk2|qd6 zXOz{)B63qQ8p?{~OV_HYi8H_^a8w5bxk&&BUo@-Kq!sjnzBT|W5GyYZ{8`fjOLCq& z5}MCw;x9$JI6>WJp?+B&fWpITNv`j9cr6A`2|#pvF;OqTF*7uej9tf=-a zL~K?PfqVXa#4^s_l8BHHS(~tcyS8Ymb`mq9_fFZSM!vIqb$LleT}k?n^bjZksR4dh za9SYX+jx*sVv5T?k9una!84nsr8#n8KS-m7=09ZWYyhyHA*}ZWjo&dQ*PJzMK{}5h znr5R?H0Zz4UN%olOZ!swthVx-@5TPFLCjJ{c;l|#$Ef1SweEUz3qz@D0&v6Z1_DcD zyzWSPDk*zrKEj*T;>JRcL%PWZ94#axd#ObC0#kd}E{>h=DgKYymDrzba<`q;1M zcQs8?N47P~T02Z$8P3Ds_g4aV)zs8J#eWRE+5SA*DLy)DUWE|QKd6;4zY-AWi3e>s z2s*lZc^n-d$H2bE^qsQq21!Ur2uw}YaBK}UoIGOGek6Vc#(hCKu8~e@4F*TA$@435 zbr2w=^p6m|N{XXsm7&i`fpWkf%L_Xn9Fmw9m>ILJeeGiGhO3oXR?ooWx2Df_7(G=& z*8O0xKJ%AlrReJaP2am$7~sAIBPRu@BhO%dNCV<bzjhSLl7QTXAdK~|xc}Z?cv95?19mE68p0yAWOy0 zKmD(X3e)}7=v3fR*s(pDmCx)r<lxi3XYc01Nfsc))zUcZA|J)HK z%1yNu12a6b`3IGSUv1iIGjvz93=q$!wZ1u%o;sc14glxW$Ho~W=r&6@o}Qkra+iyv zvwm{KBxHNTVF`qgCI^H}8ur2=VQ8bBK3EB~R5+IwPV1|hE3nG6B4Q`RbglR%fJK*rp0)}cOD1BjTR9G z0Rt$lsq%I7@We3Uc9&8<`^#7oxtGCcFBFvvd+&&IE_~m@2>` z-!?|$)?k%h)Lk*41>&hBW`l8{JVkI6bpI(}Q;&isO#-Sz_hG6;P3vq@^}T#6uPhw= z629mlwCSLG{`2WB$fVZ14>1+;M~pdto|Pd+M2&_{E!zrGw-ak?ri))!1MiN+j+-|T zW2$^U=Nv>0AhUnk3lgo|DK4g6YWn@CJ^IRb>(6@H&Q9KS;m!HBnAdUP)hjrU?F)UO z#i`T?o8R9mf-G_IorGL;0fw5I-Gk*U4@W11`K_%+vi8u11&96x+ynZj6MEzY3_9<% zyhu9VKV2BVV`HVBIC+20k8U z3dF+%sm%?#=c|j})i3Yw$@2ZnmHzWtL+2IuDOKsozave=G$#nrn`^&psFt{#xA0o^ zVFQl1-dCGE-Odp=nOmZ^8=h1_$SA?%(=+SRDw}$De|oB7$@eDG=K^~oo;wifGHUQr zZ`5t5+QGwW$RM35a}i3-GVQ*kiUxhTl`}hL+|KpPG;b$a7YnRCI8C7}C$p~=R4gy{ z2iJSf>elNVW!UhGHUL6DG%@JG1mGLIn~6InLjQa8A9H^=faKGS@i3vRF<=(u%P0(@ zK0>avfc0>4Q8BRwJ9G05gkaGu`zo7OeeHF0bmnWme>XIH<_!jAk%7540ioECWzAr= zzCtlmVhup}P)v>EQX^ur3KjK93gJgg_(>-sZdAH{_8>+Axe`mu%Ws!axD()pVZ-*O z3#WB;ZePEW01?wq(^>VQt`8UuX@h+Y^g+1qlb73ziozw5{28M;-5s7^T%@pB{QBl` zps$$#RGFR6OU6L{5w~O-&^I_QEAOJ))3Lw5FY6F)2E`HNUI9>o5pEPBgevNPU1J1O z=TN7PmQBKbUFuR!Q{QpF&D-NzdgYdTx9l8vMxDb8vtxxe#K8{Haf^`l@PhUF7!Oyz zf7V}G&7q?pSEGA4(FYXEXf~7?mOi^!Ak>%L2n3?U#*mz^wZMoB%hqz24cTz`*(>-fgVeCr z+}lp{1#sj_ji6E58fVBRu->eJXR~t5S^#d%Fc}<$VwwUmq3lGbN-5%*JoYJE9m zJ67Giv+i&lTH3@MI!E-MYg5k!k@Aig+Ol-kc(+fGxmZI`3U^Gg5JJ`+rte3t{8{s+ zjDpA0v)mGBI=sfID-?l>fj4L4GQ$;M+I{2`l@*~b>m_Wokz$0mkK@H?@F7fXFlKG?D2rO8P(}K&~%F-<@dblj)xGU}gLX(M$WI zjE6>3N?&a}^5tdPNS4ZUh(1}26>dUA%2%U?81)duRfu4eZe6*JXEtOJbvgO zWHXSoQn3}0Xjpd`G5g~iyv<&l6-8&>)IZc&K}mfZ@?+27^51zs#(m_b0o%S? z7T^-lTc^yeIGr*C{L5$_Md#L&XN<5Img}k9#}tDv_4O&|=-mr{2|$~s0#6MC4gL0~ zzyEH(dlSl_sD5EkRn<)1SX?Zxb{P(H{m5^opC5F-Xb|zSZ*HS=QD3`IwflH&G09@! zS4UaRMsIYD?#xD)mCTKKlk-wPbuI6EvzPY4!H7E$QSSEk_QW5EJ=wT<9iVLoRY3zk zBI#nrBl!5p52&-^JR8|QRC1an)pV_gtY^1Y# z8s+@fl8*o`d9#%acnL2a2I9EAE{%_m?hq-JLb~&5cturDD0l39ff+Ss3G)0aLZ(L&Hry5?2_V5dwS+H(JD=NMWKjMPtOl}#|nsn z?Iwp(Wu87d#D)oV{kzqtb&DM3NE}X(u};y11!AZ;<$t^N@>lHDk8H+bPtf||b?Sof z`nEy;LYv=I&0&>s4FH{=KR)zW*kxqDg3BSlD3RhHm*Dw^Nicr&ndKrA3|YW$T|`sZ-nkj455%zn_&DkxMH%YbF^)%?w+XF|$Qr>TO3Mm7%oc zbkAsccizX!kx-`Obi+-tU$T_c-Dn?wbbUhY;Ls1s8ncF`va+#d@Vx27{uFpKV)D$T zq_~*kv`-dml&qN9=noGg#@lo!XtvzP>;OdfTT)TGyBC8b=LP(D1*TB;{p07J{ z?HcO3gEKaKxJP*#ySb1^bE3!!JuQLYvkUru*KD;3K1*_j3UNBAZ2~Ai4%YLMRb9~v z6l!y)(67Ua@Iz`n&RrJ@Qs!pVxrtchEk6Qk;}FL$h@j6?JNDEQ$;Bg~MuV zhTYURq>O?$$NF_MNycWTJL$%o4km}1Td8_U`i((0VS5z%S#O}*Docod^kI~)sfbMz zS6jrtszeTCI7EzsA`}?)di}9o9KG@9B<~Jko12>p{h-ZCh^`w`X%@5U8$0uNx6b+s zZ%`8K&8<-{-}$3_G@fm2XwVsexpOR$8x|EWPe$OP7jRDUCZN3Cv%V@(6}JC(6M zFd#_s{!b5fMz)gpQC|zr2vORfQGxO0YeEWeJdE+Wp#^k9;F5W|3_7Wfy4GbvqoSrt z@n?W!qOPuOn76oc1h>-Q?pdcc9ayTWF7CD*;v|_WKp#b!nSwni`WFPV;@*Fgua~;+ zQa`GFoVQ|mIS7DrP6n6Scp#5-r9|}rvofkRhi(Q9x^Em{+tC*nmz>Cj5Fv1P$wl^W z|48O%H2_8ov=PpULx8!wI+1r9;#%0a#MSau`&C=`;R?_ms)OI=c%aK!h=gTdmeyry z`P=-lMFs<(zco%L-M?y47g8m0A_}Rd6Z4FFVaA1soCXuO8DggqgN$o|l%wRtLxtA(GS6 z(<`Z^>R(undV7tAPhgES&A%ly;3o{H0;k5rh8)!Z9_n3fhkn(}6L!X&v5dxj;X}Qw z>Q=={tOkHXU*=nEBUz+SU_gEO+(S>%>u?keW~jis&Xk>?z>gLRpJK+-KArfCY}`DL zg9+O}zp)LENX4mSjGro`$z|QMOKP!{O(>x%30p(QLf|HP~fDeGIcBNJWgeSM%~5sTXS>v?^PfQMyB+LWU>*+JI&PItk=zm%PbP#y<~EKEEx&qQJkPU z^l=5{*Z2J@-&-ror7mCj?x~SOgU+OV#6I268EyEJna+xPeT9bZ6`wImgRP?-5BjaF z&mbAbUmbR7Y!X%pfng_EZwPm5*!aDjuXP*1I}UGNr;Jmm8LcP1G7f{-6EBJ=q@Sd# zMKG5iQG#~^xW(LdMiFWCSlw(Afvk#n97O2zi=On?$QVaYH$jiNqcmwI7+@%ER-FGf z*ApJoXs#gSRgCLLScupw1N-(YOo{5AcC7GiL)DRiX!MdLtMP=bEKjZ#)xL+RmIl1Di4c-w#ct82lf>%37}JY;-26G zuRzFBKgf=CfKDo!amYY=qqXnNh=?=P_?RF5(IiAF>F9DPy131W{VPOQ5Y^#jC>@EZ zHe~=4d0N77V6Dq6u)-%I1O|AiHu$0IBZcx{_S=d6_~^1&#>L=rWnky&>rPM~sQqPx zl&fj~!cF)m8)-Y@q=vA<3Z?2(LMrg*+Blf<7;b2xL46aOsJJz>%?T*^li`z>8D_16 zsz*bOH6dgcNFA|lwS3aT9av2ytwlvmErM=?6gGPLL#;=^9Z7-clrtEHCdJPGMm9?V zYE_b&ntCR|5)YHfJ^Wdng*0NoIcfFFK$&*4a(ndIXuqJRNS(PHRlbBCeY|mIfZHbE z9mo`YbE&kd3hy=-NVuL@ztQLR5zDkvoS!FfOP5-mx=3wkmEhzlIRSiq)(QNic@wv~yTD(ae{ zllk+lqP-!kEbap#4-p~p`JVxa5nh|}TJ+frus4~VAG}sIzI`}ccbH7b3?u0A*SodE z>|c=X1!ao5onR8*h#=x`?$*OhN@SNwE{ zLcQ8J(@|(i247C0zb*vEgu3>@t68M!%UGd5_xmPKZX8rf{hMG|KjO)+O0wVR0aZrv zWl3Bb8;a18Q~(J$(0KXrTq$RSl^FDcsxW#!JlBaxHx)9h&&x~?8{MchSL;H7zK4F& zE@GHYyk3TVL4p-0zY$Q2nnlP-g18li9k1F2DmE%s!Z2w2G11j6*ST>0J`Qo<2vCgQ z{j>&)HGPas!Y~5_ojHe{Gn}_)`Z7S;)fpnBGMvfsFKjF}598MyWKe-wCpQ-1(aL@3 zpzwFH=DSJT@RXVTN|E!t% z87`e{4tO0cm)%JDj1!4BDD33EA*Mta;TFN*y&!f#06lBr1}86XNpv~AMth^$R4r>U7QEgi~S>r&+HI&Cx~so zysCT{2tQpmy{a-s(G4H|;jUzcXMUobR@E!`UfCbvrM9&M*cR;mmF#d;8*cnLe*9{% z>bu={X{1@L!z+63-ske%WZBSVKoU+ZYdr^IP%7W|N%v~9A=!?sDD#%>U*xjw{r9a4kkTo(h&A+6k+ zuXPy3o}E~wqe89r{q!P@UhkvfgFcOmb{?k40BD8YmkNEe(h7PZh&`f|Baa-fCMWp_ zOtk`0gsZSi*S_5ZJo#?Ix;@`F^Ht+M+Zd!3^VHVWuFp?Pi-?Fol=CkI*&67Oxw1(* zA%E7o>gxE11=KjgL5g(Yjc&W}hDQF}WK%)P%#6#jybu(JK32{zr6!M-oEo!rF(@>+ zXL7aT(H$IsFyjs<+JRB`CvW)T(h{j`A`3!kH3Ne@*Wxzw8cH?7FTWU;{c&?!l1)x< z23`S22wg?JlMMeNj!^Cq3~<53{C2&_UVg)X=>kC2m5Zs>`y}5SycYn`kHNRzHMl1F z7^YDDoIDnTbMQL(?D$8XjCpu)@LNZ99Ub(S6}S=-@})dVGTS_T!lRrY&1C$QG%@gl z?TCYQD=S}M$Y1;tp+T~ekBrR9%&h+@%X|9xaQEhYC(Gp?fp925q}(UK7U6(JVNFg8 zOkYNyO$Z6Ug77j21=%?B@!YYTW)0vidT_#%4qmHfC;c5Y3%R6OD@Eab=R@Q)SxT}cE!3QOM_Ser>O)X}Xju=ytx3TX!#Wks<5=W30LFx! zpOg3jhrcEgufI+JvRuUL6HF~mdT&+#53X^oy=hijRA)SREGk7w4xbv~*A^#f;Ph}mQ*kioS*82H7v0jUxx zrUss>Xef%Dcx2f0^BpD_1)WGbimiYKTeD8{uP6n5#74-ob$o;#&aT!$nZZznFq!3)V8j6`LX7QnHaQ!djI+nD)3jU53oYOuWtl|;UuJT13X+U z4o)t*Yy~v6h^)^_mV8wm!rh;x#imxr>8A-|YQsXxiUE#!ATj2V$GKRcDZ- z0#!Y4?MA~r3$>4a{l!2cO@g%A2p#e7MK8wLlL4)Ft9~hJ(}+eX20!Q7+_wh%-6$9~ z6hcCx<)+ks*~aNli7+<@Uqfc7s&Y3vm2?#paj7Jt^eo{$ zTxd=HMdz2dttHXVG+$?kmZdhE@&LJVxj3%&;N1x>k{|2Rk}89dejw3?}#d}^O7M1a-zm$qr zt#e@&2#=^eQX+#6BWjp$;ed7pRS#(i;wQ^`?T(jw?cvw}7tQlfOWG+|q@IYhR`8A4uA z9R^!!a@kt3geRtlOGiT16 zIp=xKfSlmYVzb8aL}*}}9HEIKOlFLQG(tD=IJNY0Ym$uIi{%U3@nL|g9)Zp2&H1~y zlU+Dp1Sy@~@-Z9}Bsxn;AYR=NRg=PF?EIs!wo|ZHxqy`-%&Nj;x2DE&yPod!5peM^ zB3t8i?Iqn$Gkb9gfX+dHvlDdrcgZ6)Jv%$Q?5wY^Z>2Yq>tuwdrtM_G@$zi^a&+ez zuSgMfwfbHMSK&nLIR_BUG~z4GpRNfuM! z4^8H~WoJNocP@WF*3%5O2vjR?b$OyFrPt5e|Pq zPzIAT8h9P4X?Cray8lQCSAj@(^XzZTDGwly-~592!E~l8~9Zm!o`N(bW?PK zuX4>+rur1r0~W$})_5uI&&e=s-(fN`jxo-_vp6P)ZD`B*@1z%06Mg*cxj`3**(>-J zFuhkD7y~Jy>rCerEqee62iSq1e3k;+g1+TP{wB9Q|GfVqt%U&BzvCcWpq4x3tM0}; zDaU4Ajb`IBH!A71n~~PnYxBt9Q8w7dXyzl5LlGIS0@)n?3(Yyr1UfCkYa9^*a$lBn z9=fmJ2#88G7HDppNJjM-@QO-{DmkAuxH5AJy?}POdZ+ngv3!TWC5Q1-D*5{h#~{z; z3n#1NhpJ6toO^8+^XA(6q347z%P7jj^xp8OK`g-lzW5gO=^m$a2z~zMNRT?}<0JJ& zIGql5tYCc)M>a(B3B}+RU>lx-uwwgy=h+K%w*ABy`7sH+@@Mer}r|jlwpTY zcNZE}`9HdrP*YOYksfZo%W0qDdfR5)hLq|o!J51Q8_#Z?=)M>yXY6iho;WU1&%z1i z$wVyEN>kuxi)UZAh`^J`v>24C3mAY>BvDa2O!JP{L%4unKYzWBSKlJwTA&Bjp*euU ze^p(()_Y2UU;e_R<7>U7@KCL!=*miF)C?qx(;t8rUEc4C3h~fn(&%?F{}^O?uBH&P zf3j_M>0jV;a{tZgD7{Nt_jI1c%AJb?e4JbB6X#QD9iD!)0m*1ddOS_#=#-rHHla z;c2Vpf^-{E3Qz3Jucm{Dxxvvt7n8N4mR>MpBb!W8_drn&?zK$La$Ji_gIX+XKlJt>8}x>(bM=yxH>_0s7GiNB5A$PEAKWqI2cLx^76rt z?n+iyZZ{`hst=V>bk5>t*&7zwqO|n%_koQ*miq*K2!e$ZoLz)Xc>0c{Of#4q19z98 zz~=Xi;-=Nr)kT+1lNBW3)_P`=VHQe^(!kxCwxmc1{qFAf*EiSc(cFI5qXgd8CMX3b z?mn%?1U^M^^)pg~%D@6jBN+U-p|7WRf$BU{)xv%_{0=+YhDS*}p)AotOu@&GG}le^ z&h+A<>OntJz;BrD8TqEoNNelr*O&$#4RcavU`<~%U!E*!p$stgG2+ia z$shd4VV49kQH81Mi?{Xi>x}5~MfQFLi3^9jV+&gExqPG~uUwF}&7y$1*u7Zy}D6)l{bWLn&2zChC{-{r8kQ0?>f z(teofZ73$JQ*5)~k66DFwow-%Fk6?-FxN$ES8Lo>cDjI>L$7dso=#Qm1_2k5)~Ehu@POC>sSWv3Pdty zXuX&tD;f!>cSQ)ZNvkpV72dZ0HdU<3!YyR2B?5+jpU09Q{TenIyWwhk4T;EMv zq%0^Nl^wB(`A*qoY@qkuw)4-&8h2dVCngaP9%rJmn(5Zj7j-1;$mk@Mp~hF^EwRXq zU7T+Z?VHtX+9=NJy-GDASIkbs!9jnG_kwRAjGVGolD|Y4mfzPUz}?1=CetCU4^DiN zt;@4k5D*yHuBTb04dSjymJSUqDy%|5p4N9jq0I!DcR=Lt8CM1KbZ~i1`-$O{RP(Zc z^k+fZxfp?tw~@p%!hO(w<3ydm#*U89pG#`r8hw;dX?el7*=j=<#wh7yTgJJX@aoyR z@0_L|lvWaK8(!HYCaz9Sd_M*F5D#;@>ir_GI(bTn^-Og72cZJVQGdjz?8;0@bUALG zfn~j0UWo4U^(sByd(t<{Pl{g3c<8F013!%F(S0v zy5@Re^_^21<;hDq(# zw_q&rcuwE`qdPzyuRm$9oL^V~eS3xJ8{EBszp5Wu%#7VnKYAPH!{N(C#z``3IEH19 zSu4iBFCMg4u@d^;%8K|k!N)&WL}7Am^2MIVn?l)wIYM3~_#>y$a&toyJqYTFT(v&7 zjqRag-G|I_?wi4n2iBE9n5?|B;-MEvP6|Tk-z|`DZ0xXKLiHf|9^F7=O@tw+REZ2Z zY7oSm#4Hx^CvFF8cP?ps*$<$kd|hWq(CP-4`r0x8BO#*nlsuI&1K+EnKUM zD(FlvbN0@*Np67l9%Ce#FUc`dT>4&e$+mmLcc``1V=#U>?ZMTW#|SoFK|1d7oxqeQ zxY?C8ZAT*kV$HEgd0bpCFRwvW)$cZfdjactIg;qDsmLdl*k#Z7THnzYcZH`V*$*=l zi{{a6+@^mt@K3U&*e6mcBQ>_f`%Z%J~VhQ$;q5E_R?N>9>-3U5Oc#J+!qU zqF*q6{J#;G{<-+osqr?ro>mGF{EGx!_o`Y^5Y;Ez_->~$c3**i1=&2v zm$w$&iBYgDES$z-_;K=sbe+VrB&R4MoVeJ@$v&TELrB!xqna{%dgmSitZ{H_%c0F* zr}`*`jRYsEtC47JN2`0@)uU(6ml^8|Z@soC88#avcf#f?Wne@w;h{=6cr=QC{MPNg9GbF=39TnHH0MUAx&##SQ&Tu*^?G?x znt*ttI#h7S^}ew?4-Yp$2}Em}mazXR>H!`oL$h_RCblWLPxo_ki7=oUW6=FPv$!H! zm72uAf8}?khm$>L^3VS#8E>J-5$8WjWzLV%I@MnAOEr4_IQ2~#m`|+xR%Vs>S~}X> zl{6>wZ8Iex3N%4fTA7$uZ9uf!q z7k{aoK0UGIr6cZiZ|c2;hgA+45}lrP;pZ>e7{7aC5y%a!K%)Vuqq{qq4DEMY<>%Qx zm}ErGtFu3|iI?Y>iHaI&3RY>y_^=0O;j^0qN`A7#WkuokZh8+$2UM0627T&~$vnBC zHtf|IvQ-1MX5YS%3VmZ8e#6`64Z@Z718kz445Ie;Xi%+F4g$56tj@js@r)14O}e^Q zUDkG{dgcY&_QO6__;JV*eVvpUi7c3*E2pd$wV9Q2t7h(CQ+Z4bIc#KX<~4UmX&|Nc6LZS9m0*W_~(%HGnc_Umk+ zc}7|B5{2a4nj*d#t=v`hmt`=ymWY3dEuFl?j(T98JrWu940UVdTNlXrHRVW_-ZngI zA^I*or^ol~msDS|U4mu7W6{p5U638Q$qPY!n1i|^N)3?9a5ggKzIhPrvp)s3KD68r zU)wNCB$ant50>3m}i(|UKqbO-*TS;S&e&W4ibK2MxU7} zc+2khy{#z?HC1iZgyN(MH8Cp(+xE|&8>RsE$hBEMXIe^f2(w6HW_Vl7UiVkWmGB7& zXgud5w>cu!yNeHzPjW>x#twfpqyZGK+QUOP1k!1>F}Kx&rlHGywLPhP)mK77!k}N7 zx?C8{1c)7Yn%`TPOp35`>CAz={I=|xyIi?`LueaN#9?KwgdQGcr!*LL-qH2n6ej3w3b_%;|H z$3m*xq$y0}vl?x7CAMsGu7N$@K6zS&ZIil6H>9)tU(7y+WUwI7lvMO%gXkSssLQyP@m^bBY-b z1!)=>5gzPA0bl3-1JMnA)AxwCG`Wq=O8xz92f6G=OOD2OT`1_XkR5Yifq}Tbo2IpN zi8?1|se6Plf|CBu{{HssFSQX$wAfUw+tnfpi(g)5fez`p8A}DO3MJd6R+pC73xAYE zdGP)8@4KsI;UGve%AL)=CF`621aIeHrPremdQ+B~wD_BfV;Tp?2x>&yvsR8}6~5rS zVfMkkZuvh61-H1^UxWO=ZV~^yb#r`{qh|A_4KI(keNu!9yep30bZ*$bz!m@;hwn}v zmIZV{pBfDWt|fldu~0C`xyPg00~KW%LxfWJQB{V^aWmiGpo9Aa6oq(~vlXuYRbwR7 z%ML}d6k(S_=3AOv7MmXC44pLAy}kEKzB~4xQKp5h(J@)Q`qjAi$rgEj@vErF#50>@ zjZj3F=Bofp6oY)%ydarAuBU^8!=npypEw*8BBGpak>2Y_)h2P_A&T$21% zfDz28huXS-;I7jzpA*I9bV5VUKH^dv%}MzwY*+JQJe33T}qiFUXf zmNJBgm@-R?Aes3d4z{L|rlzKqe3)R0$Qz3pc0FA^J;G3c%b?Ao7iBp~WFL7)`RQf; zK1^J^HK~5_QQeLnOr3#|HVh9(PCAyx5)9Z9 zQSI*a-O0qw4!r2@@5~l#i9DZ;6N;rNUVCwxT4i>hzV^t zlCA)OEH$}#i(i~&UM92&^2nqLyWQL4cB10bV=q26SH9=6G5Ji8LDPhq^-j>J@ktwc zwmPLci=SsCd>+0*fA_t&BEWcpDjr1j1qDM}AHPFSdc^DRtaErHL~x2Ym?gYV9JrL% zu~MRL%Vl|sDwxX5H{V+N0su$RIgU;b-Bz!)RDMsJni*!wxSrP5v~aDW@~!zZ>eN!_ z0#&i&y)Db0Ibj2YK8~x~kC`0ls#7;G0FiLNmqVmH^oe-caslH3iv#l;*X30n4B8~- z@|tPQSbng>8ByFBnCwuhE8c>RCVsqm+}JlMz;W5q4_0 zG&HyFxn~WneS)* z___8#eVt(QNwk;~Yd`ifil^EPam~&&FBbXkS+cj2I+d{uBEUh7wQ7GJ^4_?Jf=}>X z=ieWrnd#{_PKjFI=7@VJq2%M8O|Hqa<5Sj@7&VqZ$0%*fsO#v+!-sK5 z8WD`>_}Gvt`%oLln@WQmx&z?Dv5Gbm&zhpA6%e;j+F7_5+`97aDc)iI@Ohq_cSh{F zif6~8x{;(DiKzRYH#U#HszK-(8dl96(1-U0X2||j6^52${8mTt)t!y+BD{+;c~MT_ zgZVS!;*XV7P2n_47AJBbs8a)-S5Z;9_%<=TP_QTsZ?8$9a z^jN27j;WPWyUX6MTE^Cr`D-(CS+4?`x>Z$t^lf+$IJ>8*G62*cp)lH11;A&_UJ|%K zG4aX;fX;rDELGOt-u_4B7Ndm7AnF${M3v-P755sp9FelAX(Er?yP_D{S261k9>+5z z{t`>IDa!}hBZE0q5P=pimw>=6uBQs_qlZtzcf^Q|w+q7+3Q?@_!2P{W3sZhFYj8Iq zPUSC9`M*X_w0h(N0^hXAP}=o-*kZ1eBVQuh z@J1LI7_M1Uek8?vj$LnvI(!DwKjpbaG*Putn@w)Exc`z~SDn~#?}O$pZ~tA-8qf7X zn1(?68xzE$gz61dM}udC4^$mtgHUhkO{U&X9th@Sc$L1q5_X!&&JGARKB~sbE}8Fw zD$>0SSD5JdyZ4@lyhyFtP?g(0yU(K=`iL46Y0QpNqXY4g8L^JTxS-#WLBD63JAMLM zLNs(IYCN}0s>)|=AWr6N<(8TMclMFa)6J}g0FJr)MOQBz!>oz3lus)>x}O{nu1$r7 zcdyY08{uB^PwEF55G`X|zSF|``ub8pH3ne8j7-l1Bgg+qsDPdnRYx=Y1m-4OcoKcpz$r$CP^|^a9EE@H_z&yC$xxK3{0LkLja98LOfY_w7?N@pcQ7|9Ri_G z54&|;Z2R47j82?x?O*KeuZOa0o&4&&44g8YawBDo8K<1tJ{%1^1hLh|{hn!4A|sG}o*Pq;ntNXT}0QV-7G z@XrQ?rdFr2#8tRY%uG+a_D?Gq7Z(?+1;}pU>V?#XqWxAXLp^nIL2U97<|y{J8k{3m)i1!|C!MM7U3qzXmAc2B`1sh^ z3E9Yq2z>m=iz$O4Zo&!0lan8Ub|K=oTk)tUDTjS{s{aiE5Me*RfA_AWhI5SSIW9yM zyL!lw=~IJ>W0!%5x*>}Gn>P!j0`eJ9bvPMC6W<>bN?~vY!?gJQBd3JO(A#A;ui{hx zQi(YY`Ei=Gx^Dnt%;~4E(vPiotD3(M4rz@qosBKIdO%Q5!Q$?D*AnkG9`xheBp_)7 zCf@LssN~rUKsys!1yG58Ws_d|f{qJ3EK%1%v!^g(7#C}B>_2v`1BcxqhL3J0N!T7a zPQ5?&q!i<34@dgu>MR53MHFQ5%a`g6h|kXlcoQ$}+<4SVliQl$Ok3SG35FUipCVrp z17Z>pgkGu}@DKpBH{aIS@d~Ay^WHAb>~|0KOJs|Esw%%ShCCCF3>I(_{$6>yrQfNN zf|JxRiqTp6B+CMD7ox|%GrkuwM?{-4I8r@=eZ z|F#O1cj-x-M6zr+UNB>01KQah!?P0qXO#T%eYSN}lk!L>19U)>;nRn_8s%>U5H)Gv zt`|ePJ_n1X-4F~Vf=Ukh8>kuzRZ$KU+rn?y{O0Ah0-GM%1Z;O~nUv_B%86ax?nYmj zy{$6jZ}OY7m*UheM(x*A&u=4S^%m^Gc0*WyC$Mk%C17XBa^fenYj9R?7`pvF zSmBk!0O{|eH&H{l_6VVC`81AS>!qe^f(3sY^xQ`M^fdih%XYpntwWqY8ek1xd_)ss zhaEmZ#wUB`C=BnnWNAUXGWZ7bHWNt1qlmDRw?IKEP!y62?_uNNMAX77@i~w0;k@78 z+rn|!uv<3A&^X*?>SK}D*-IMOpURfCmObv;-z_i}N#+|>q1Xw! z7{AQ$b++@evwM}!fUt8GBpc~C9H6)0nUMzkC=krvC(|BpYG_C()muh+5(oY6y`xrv zVt}Q}C%mMgt7>dH=RfCSTpG@#`>o|{c(w6i64NYp4i3>P#_na&b}WEU!qi!5K=p{7 zg2npy8EFrwLa2h>RvYxUIY{+4FY;CRe1FQv7+=p1TlM`=5`CK?Y$(w|?54&(s@r&H z$C@Urz#hYjx{^gWvsT+}W}@_wy7D5EwylRnL`_MH5I6E89wZ{fEP?07dscp@9x`L3 z*`KMj6z^Wc2uKAMZhd3I3&+Wvdi&1}G{X)e4~woQ(h%X+MA|p2oQ^FRtxw6c5r>0k z4%MQ%C+Y6+6jv5;q%s|TPfb_=(FT%->uQ4K{ExFKQ7?Y4B;I}eR_6Gty;7ll1vihl zj_Zdje7?4%qqZi%hYUJ z?IXBOv)<~KP^4{4J`T6K+Ur1KpO4h!ooAGg={0&%se@gYpsXQ7@=`Z%)5H4MMnf`Q z`d3Y5b%}=#vZenUgzS&PJLj(iaYIRNFqyZ_f{;1)ohlHka&Cj$C@PxIvp;SKxCR)4 z>ID41dz^^sRz@QaX)rJaBiF6%ZEb*lIJ8Vj=Wqi1BeI{S($vIMj{{-ij;ZDfY^`DI z%~&LqP>+euQnY2Oyc|%Eb~-ho0nbMTRmg16jlA+;T>v#z4YHn-IsYK@5ZGC@z3@^h zpRR_~HxNld8b*frg8p7;+_{ABGS=sjsSUnYO*_92V`2Nk=x8zjckjIS7mA04XAOwR zmsm(rPfvl*?wga0zQP2={J5@GCTkd0pxN~+^;7MmMKIekUjjJ&qXW`7w5TMc+~$7udGMYG@W z<`1FmtG65Kb*$|a2O|lZ9Q8d{9U5dZ>9fyW$1=fQHknFA6OxKZ;gwqNf}nIWZ>!Gd zs?7zi0>~7)xj8Hb{N9jN7;PvwFrPp9ZMfI|)ybGx;hMfLJl83keIS^;zO?i)rxgqB zLl;fW+cXZ@CV3U_%gedJdFGHLEhPoZ3)2EP5ddZmTZ~C`Q?9an!)qC)IFtCBH*Idg zC~dJSCsHZcB0tW~3MBN#B3pKchc=#s!DC!}mpI>CfoG-?ui!-ULk0lLoN8D1+qKRB z;Yy{@8w8z`d$ZW=VrEWlpZ)oAnA5TTHSNte=Y5#N6d@VxZ=}+nftnw2X6@Kn*-e$7 zqE2W2M3JPo_^*jRU~HF&dn`&a>D{neGA4+QxNQzYQxrLh_R)j*vb|dRRcIYVA6mg` z4YKomTg~SFcQftB1q(aLA!1)Y?g2+PGFp|tH`N5Ei6(g zqC#4zdk)@R0OLs)<<*7|WmlQJZ>p(z?cpkpH9rtXe>!xPi2w(kjb{V%-^?4{f4F9a zJXgH7+htzjig2BhkG#3zSUo@Llfv>*=K{c=F^K{%yf}8wHQvD9el!|t1$&HV^z?DA zfAqaM`ns=gUI(nvCsLa=%-1_Nh-7AmwozO9d{%r20vDbHA69I+y%FRGz|pmHbS!i+ zTdx6)A4sZyOqPsD&Nv0f~wOyYs1SSocatopKWW==eY_ z7ff`hF?-$|9vyYd`O^?wP@M5}6gE{0_knqOvRY-3DA>}AaeQ3Ewm9yGw$urD`{f~> zmU~+J#cmcB+SE2y2U%zH0E-kg45?uLrF$pf;BYF@W>ScPn)*ZcL`06yjQf{)$A$$z z={U2K%RfN?G5KxciS04^NOu3AK1$@q5+GHNIrP(8ui!{}MU^ITMY5+_FG2cYgF0F)a}W=T_Er5&9%#)~GipO~I&QDY@})Ycsye^X*4 zc4g8>`~uYXe|jtbFvevbL`X)A7XQm#1!h{c<^mqCGGWfqJd_%+^&@ zV&cws0JsQT4gnw$uWMxcHGdl;ljirbsJ7*r-9%4W-FN1Ma#zSGiobICv7=Uv0Vdjk zdL5V#_-Ts01;D2)QRnplm_lUV^V|WQY)HPa zG85D`kKJr8;^(%EAyR3h_1Uug!xv}2%|%o~Mx}=<<4dR+Be(|yd0M@{aop9gh8jn;a08<2O*U_PV4@(BB6L;K2?>46k36koH|BexDlgEJ3*7N1LOwz20MWzKSpSh)j47ra>fJH#cVi|ygaXk-E_ zLk3iCv1CXn+a?Cmg$jzLBuB>|Ww{?{|kk&#$rOgwV< zYtJW_nx)%^vW8yM zkCMo1b43gpDpCuc6qc1m9l_AqD|Iz*!#4n8ko=eR!D?4>K|nW%T%7$eF?shDlk!Td z1!R6OAv`D&^ik$~1D-2Zvha-uKRzMkd*;Gr6OVG(3&SaM2TF{JyLEqFUkc{`ZB2U}RQIjIOXY9{>Fr&O0CnhT2D0 z#q^Zr>r#~`juOG3WURVubgu*2k(@$#26HbfpIwt89t%biZrWOG@dMde{l|k8gyQ|Z zJ6Z1U^Fvcb$>__kA3t6`KAVvB@VIX@w*K&Qy%$fT4Hs_j1NcH~@*j}3ibtx5Xtw#? z|M}sGU2?-M>}@YMy%goFQWK$0lh5BjD3l9QTzqQvSfA2*$7lxG!ys)g{B4iJggGPIqH#Iqo zi!7>0*ety>I0?43HKo*-6s)&4qBwAE!B$G{fldRD3mAKh&j|>3g!5WJVk3$Y!nRj| zd!4W$jvHAzW^-0n{v(;a_-)~#eBCsnA&SnL3>|Hwj9VZat#l(RK@ZAR1dQVsz$LA; zp@khbkv$S5Lc2>>t9C)L!mG40qTH-j-0t7Or_N;Vg&1`~-b+-Iy1Onf%Y7anjYskd zaB*4WH%&D#8WCwpA2XEES%agNs z2$LxlZVSU7%~&&5xrvtkXV~xg!oK4?vV&*SPeeM-B%}&(&E>s+FB$vj?$g90c2H*7l(@VDOOk#p}yHjQ7j1VzqVMfd7Jn;(bHWOHqw;zr48VwNE^UT&oh7g^4 z8O%``=iXoDbwr_1-O|K&s#eJp=w26C*_%fXZUp)|=h3DUgoLX=$wc#G_YTS25c+TR z-L8Y6>dF%`6#f+ZLP!#bz-9jH0PjPgKv!GZ+jOLNqv=7p2g+PfSB$fjMU2YkhF5~4 zC|niJ8bE451($e!Qki?#igs{*EKBUG5hiN5XK|1lZJe=c5StDPw@EQyGb+U!D#ud4 z;j(SOtH-dkMaQ+bbm?*}V238vOiYtH^`2*zN#s{NIUfCLso5!Enx$~V6gN?yf79Xz zqb26Qw8OiFwRvNY=Q=MSTmR~dBA4n-?Nxg}z@wKYcr(@t61~LJ-KscT`1p0MOb}DO zrU=_t_wKJb56}_xERBSpy`Bm4p~-4`&S?x3?>`;eWgq00VJ89@%Pgp;YxKQ?$Wt%6nus7`4blEVm@-<{ z0w7!d+lP=Fu|w$J9s%4^p5I8ZMiO6kDHKus;FWRzEojqMA_2EmGe3=8j}w*%nV0?a zL8{l08;-Wp9Pij}pukMDYwK$=4b~*_z<A(C#O%eeQ4M zP`Z-u-|$=K1XeY1)wnhIxR9JqRlX;D{8k!;B+3|%w{Pkqv%yu#jUd#g@9~A^PFDyn z&ZjW^TO5;|*PC3Hm?A>MYd{V{C7Nbcb)HPWC3&5=gs+pq}iNTr|3vlje z4C4zs69y1#gou|fEG(eX_=|Uo4%UI4 zS#SK^-`?b|>Z?axhn+qJc&YnYZiqCXu4<~FGwOZ~+~eAaS53{$`e&R;%vEIalQ3?n z7#pcB{qgW)3?Pb7e8Si`ANyLiE(!5htvw=V-`{fB##F3ddUHULeyY~hhw7t}M7a)s zaF2dxZVNx~5D` zP35?`W&3(Sf4h9Y-2^9SA@JW@Y<~SrzyEU*BQ&e_U?<~#U$=vwx!xBFmjwm(9h2|v z>-D-cqkp`%pI{HX=w8~`$WdH|-+XGDH8#*n71Lh=O8^F3nKo}UXI_$^JK#NF-8Jc!7popx2!@9bw-l*6Xm2o=i-3%yj z&n3vAp2VWQr?PH=A%=wCo&}T?_Ud^n846!@p#|<5!z8-oE|-yM8g3=5u0hAyRo$Fn zk~_BDBw2Zlhw28^>RLYC*nlqdW|3uVuREE9CN1hw`Y*XJpKKzL`XwdxbnfQJ3>!ah zk%tw$Ad-h)*3w8JZ8XPG94#9UF|C zf|7FsmUY8rW%k1$N^?Byl1HmRlx6e1i1rWifadX9WfX{CK@m0^iLuBn>1L-T(*p=< z?b$txm3eT;;dl!C1SlfaOc?DTOJmgt;9aTCC##AihM0sICV?561ip9Edw5T-c=}ma zHFWAbj4dn4ZKls2Y^|vJqo)I)LafR)CkM{0x4<}HPQaMA0baB+*UdkF2nF!@7zT7S zGBO*6A&#pe!0y{XPw%Z&vf5?%GdXYxB5YoKhT1khK%d+iyf-5wHDLDd%w5mxST+Ko zw>^SO`oxzLa*XD^uW&^8mBu0i>xPHEv?pdcN5ela$OwOt5yXUJK}H5uj{rI2Z-u@? zazXho8D!0Y_iJ=t{t@mP{Bq*kvwmkv#LruCwG&_k1OFOf!>3CNyZrz#Uv7DG1;g>! z+1ZgVX=z}8l>`(!j&q)wQtiUG?_?OfgP_LaE*|7GjDR2Vxq{Kr!YCDtO!bF#*Yi#WSN zT)%K+h^3hGC*R)_`J0=Ypgz-$*j{=jhINu3(PJXw3`(?g1RH+6P1CMFzGmX53a1h z=$gQ|yBlHyO2WNX^2bMujrSLrP~%A$&T)S%I1fCRuT$)Uc6hW}5s)$4%2H$l-`QTt1I%o6#9_ZFRj+k_CDXDmb&id)B6pqeuD04@5=2&Dmj& z<6w~~k3e|rJx{?IT6veiiMM3Dvh$6U`ttS9oi}#bogq6wcT}E z@9MaV+;}x* zB_$CrRmz?Mw7XJ?!Ka2}VXg5VA$tSw{^*Oux{K~a2DYu$QsYJk%GW>bhgFOdA~hF^ zEI5>_Hk}1AOZ9W@1t=vPRuKnRExbHA4;v1$7M^rb%NkH1&lxgzk7s=qi;E{D?o@L> zP;w}FJ;^Qgm+8`KNXrx5t&ggIbEJQLswDLi4a<>$us;?G-#)M~(@$Q;&802e-riPg ztH~rTN6#=BpV-1y?(_czsd8lxnNQPyX~Ng*k3Bq=E}pc^`OrKt^wLvMaF3P9;#dqy zT4-u2D=Yi(L0kHJYbA%R-IqkYyPq{ zmkpZR*su#ka_c+0*zb}Q>cTu?FxRT;YN2L3IyxE~A1pZBekT3eQL>JUQQkku$0yLM zv93-?M1-P$8Lp_86Zq?JZ*9xB5%t12;G&G59<^1D6oNLj`gvI0H5L_Utq?Z!5{<-M zo@=zh9zb@CJ^l-1$B_5$$1=9GwXO4>*DijGs(d=cL-g#LUO{anK7LG04E()C0*(ev zR=e;Co|mW!5~PJ(B56`oU3e~O_>Ek0;PR<+Slktn4T|RkYXTP)UZ@ia%_IIP0!p`uhgAP{}iRYie zj;|V`g*YG7ZkD5yG(KINFM?8wKB+;`ab{+ZVTnyAC*FUGl)o&=Pa~1YWJxV>!!g_Q zX{aU)sS+VUUr78^@Xy7r|Wz`29Clg7M1porRb%LIGmg!{QdUi(zNoDu1J*)Q* z=~I7;1oA@3L35>PQjLup|q{Gc2@KDYOOmq?;H$Z{K}WxD@<;^9vA3Vn899 zt`I#Hbhh=eV!vuP{gPW)NT|CVqavqDIS-U_oSdCajZCVvy{LZmobB!Jy^SD?u+|le z)M-xj5YXL5cGl+X?%KV<%qcJ_jh;Ip_V+tGI^t*5`QJCy_Z*mCytL^w9@qYHa#GJ>RCxYZC45o; ze|?D&)e=%Sj7#aIzHxl|uKO{`M+Oak6M&6<6ip}T9F)=jmv-z<4<7>diVMup3 zoIUAj1S~MHEgDQ|lKwQ*Y8qN}46cD3f;QBN2( z9g&RHNIl_7sWo?X6^s?#{A_As{4)P2RmtdS-!i?PQj@4y@InZHKWDFJIE|} zaMcvE>iL7*fJTP`zFBcm(TJf4x|_trFX>8_;)zMlo#b(Yh*s)(3t!RCWL2T}_V+Q` zCjJgST$&A3S=#T2tsI#Sn30ykot`<_Wm%ryc6Q&=h@LH;in%1{4=|g@ zsXcjBWnOn-Qc?(;oL^hBx>|)`s^`sqqCs>}w|SuC?EL)USmVlj-t_eJCl0U5BPNeH z*`Bs2I<3vOOxI0L!XpI%Q10JV-QcYkpz^_B<%GS1v@!QaVW)pPJ@w0! zj5%v;Zg#1xF*7w5^m4TEqvZ)tF!8t2mgGHdgS?6DHYFan6jCR;bPXE619(jR{vGZN z@)2nx+LH?yOZ=n*vxpFu4x6Q+?f&Nyw0v|UaQ%*z$RFmTUkptRRs2<05Kb1xw;T;* zXLmlt$3qSf{gWAj=>yExw+Y*ch=`a9!S;AM1GSQ(+_9O7{G_y!eAnSS`>V-oo!EER zU)3BQ9{PSRLV}I?dzava;JMQiws9q~9`%yI+vIC&OrWv~t2p@e^!%146xzsekUcFe z`9|8w4z8f~0i_vW!!w6Qqd(WgCH#z)%e$XnR~8k`#e>^X8n0U1q{D492cgILEF;*Z ziV<>>K1MqfMZ|wHtK?>IdIwvW3%w({Hy_cOG+zCchuvVoFETPhDdJT4A$XpxbH~#x(n3*75u|3nw_-`nT*)saayqNiD2+DuWKh$uGb4AXn`dskUCXJ6`wy zeB!Z#tBu({(=fIy%ht=uS*f*wX(eM;Pyc&>BEyG}8_%CVXDPcyX`16`r-^xTAM<34 zS^Vp`6_{7M=mN1_bexA{8U6>!zqJym^Nx%hjB#Z9_3ts^#l{yO=XDBA(%-=9dH?(d zyF4{mIJYX0B2u1%3P#T`&vs&-rQb#Br=>NF&%9@jjy5qf0if*mz=B9p9LvYwDHQ)oaA&lO%}$`-H(~VY$!$-J3H?Koem#q#>LC9^9>^MUpfMX|(sPU` z<6hwUV-+|X?zp}Nh4nNuF~wZ7jdX5Z7nd~ueKrt_M?;oH}O2d+8hdt&xD%mmz< zKvOl#qKrgj#HfqsaWHqVU)c>`rsq9dK}9`bj5)!v5hyB*2ox~yyzc4k>Y4!K`BDiL zNqNsx4+i$?N{YK6V(H`zrzqP9w(2_Z_R-Fgv-sa4p5wX>aEXebyH;K@0l(ij;8xE4 zyQv&>w}HWx7{0MreZvXQ$0rOIw`?r$_kX|S-JJdON$P<(w6%TgIST$!B&=ZjzkMu= z@(v8lfuZ-G*CX3=t^@#XNl)K@0@IFbYG{sZ-#EmoT`MdEHS+B;Fv0`_*nIy1+I3y1 zXX-*4C#^;k4^~>*8R-hebksdg!f|?@IM&-`pPy4H#&Na&brR~I8$);ea2qacsMP%8 zZ(xI(RaaYv)qxrRKj#Xg6-ugic0uKPBu>YMI393T%givd0XjqDbul?Ke0MxR#w`V7KBkezf zSIRlROtr_y!-Lc=S19w<^f&_jjU&Ewywyly^UoQJ(}X2Y3sdqGog6F&M|17sr##)( zjNi!MNgbJ-yzLXMcl1sfI5e0W<-wz!1`b9>_A&su+4`<+bi}dOl54P|y~EVRq_{Dd zI&}0M(e!^GO`aYIAv(L&ztGs&NE0mkOiOC#(z3bIf_?jxC;Ru=4m{Aw$<;jgYw6=A z)2i$L(?ewkZ6iK!fH%B2>W=~1EE=~`;x>n!Ynap7+4;3O_LJqt*2qXjnt-H)gl~j( zofp;BTwdumcTNA}?^)5;U6q1LW5tr&<>lphv@dYyHeU}$aM8Un5%VNZC+MLbr(bz`e!nw2X+2Fg?Nzf5cnt^nPiQ|F-NNi!>|Qm?8DjDyRuW+JgBfd^pZxQSffzoO$=+7Cfke^*$Pwj-f%5@O7zg;zV8kNP8d5oBvkR|H!^>-n=+g z@*yTuGFExYQ+OUOD*pzFRg_|wjZ8AmY)?lH65)am;MKWecFvP3?)FVH4rm-#E>=4K zP`#w;@vhfCpu<`!)srd?HYiiix&1LH7E@IAc+jNEQQJ;{bdVnk47hT3yFWmh@pX;Q z?kSt{?4@dP${IcRA^%R(qM-_PqDOoR|BDMAH~b(~($M e=4c$OE`n=QQ>cMIao4fHKV`+I3e}IyL;nxHYm|Nf literal 0 HcmV?d00001 diff --git a/docs/images/partitionawareness02.png b/docs/images/partitionawareness02.png new file mode 100644 index 0000000000000000000000000000000000000000..d6698beeb403e86b4284da8cc7abdcd2b0b6f523 GIT binary patch literal 31181 zcmb@tbyQUS8Ze5Y^w2p{gQPSJoicQX($X<>cPTM+H%JUE4bmY>cbB9zNT<@@_`aTV z?z(^7b=SvYX2I;ee^2)_;i}3q01O}o5)u+XPF7MK2?^N%3F*l++Ec_Qh1m97NJw5u za+0s#cs@B;dTqbXfdrdqegthJ4QC7BzU#rZqmGHBXtp!fKo75tS)KBD%6nad93YB> z^kWd*hKytceEQGpy%ieL|LY}gQ;|};*;Y_@fh^0k(>7}j^heH}_0A~b>?M_GlmnW6 z+-a_#JqABB%HCOgxmLEp@19w(_^XREcxY>K1N`iVuV4?Ze~g42iU}#~ql@7b?)z)s z7?3Bfd<#1kgtai$3Qe4;#|vK2mpat02tisY8emJh$R+KpWhq*_dQ!gs(}N+wUxeRR zgi@*N5ZQC=yB;hd+iRn`-U4V(E-r5c2GEH(73ImpaQ>uu6{<-w*D>%sv_!M!03-E*kK$i|fN`oaJa8QKE6@ie7 z;I#k$Z*M2Zespi`(0cskLltI)b~3O+MmcAP{93!c2~dnS^A-Oi#z&NFU1xuxjx77y z)94O0&tVi0;};n(4-R0&Qydv2?q7Roaj|8&#)GviDyx^bsqCRIRPeAx#l$cHGwrV~ z_l2pw@hci0e&1Z$^HGAM@)#omVMMSh{wiNK^+n= zzh{Li1#wX%1f*!~;+fi#59Luqomjvdw#k~&A9sUfH`Ft8b*S?^o&fld?@y-#!&bSW zyIIAis6wH79L?&hIMd(wjGjLUd+z8-C-xdr`<_SccNvqb#JexZfct@!4}vEI$wEEl zIi?!~y$-G4w)6A9<+-tQjY+6ca zpVWt5CIvn62hanZ)l1ou)3jddbS)F01Wg83U-$oIe0VPHhxbxvVIt1I4vY?UdBQof z_TFG5#d*WSDP5c80d{9(58}L;FDk8)RF^-IXapgs?xV|?&B5Ba)O+7>Pe-{g=NEsc zSHtfo5nA#t!zf9|X&3W8S7B5J(h`%nt=42{!_RL#ysK|i@h`=+2Y8FZ7r&{Mjs}H$ zd8z|>IC{huFO8dI?3HhNlC1ISmP29MHeFs7?8@FosNn$&fp7m(~did;X_ z$PJYhzySF5tqXiwAB};bQQ-obDe!Y{^(k^VUUwmm**$~G0?}hYTGN4Bs&Vv#om8Z6 zBx)Sy4t2V)A$|F9l7|Bc$T`cy5%f2h8V5`w0a4~!1tX^?ZdAsoq}lwH=O$TRDv~`F zu(aqqQRE6U(3u|EKeFNx$`v(_#QB%!p{A6rn!n&6yo`4>f`(Gq@1u6|;ccCX2Dzag zFqpnMMpt|5588KrT)-O=$6Cz3{IwW#7vsNX-#xTtSb#Q&e>*~b$Q_T1;_hjCSYf<& zR}VWkgq)79QUOj6m#Wyg$VMf*M#ET1L~CZNlQI8dM>|_P(e1#;`+DKPkkfIh!plEH z2BJ&8lbmkH__0k4n0;n0*1;g>VucSFaW&7oxy_^R6#-(ar0xIbfFKF*CERb5pwX9` zgpk8&PCJPGmQKGO2QXlAgPPTHN&$tcn_K3=Uo$=ohN=uVM(tqwUy-O>kpO;54<{KJ zPH(4t8oA&V5e^*9Bz9L!D}(>56}BH5;O~9Dv&U!3e&QeLd`-h>(>#wl;w8bZSWQ-{ zwW=9^s_xgFGI%?VEBe)QIq;~kL2YPT6ucQP*lYgTQ})XIBby*4U~#u((MBJMWTkl| z?F*&oj98q7VB=|xg>+) zdv0Q#6GG$s;#yO*ICA?0=lkAmT3E)8yw( zj&E1TSH1=}zXD1lT=MI#@JX6RaY@pG@sb?XkS8Oh`aoZqsLHeBx9M$Ja}VUtrB!uB zGB#_r1zx;{>b5K4WY(yT5KzOgbI6t6722GBd~pGxz9z0w964tg-kRWu$YaC+K2#-4 zrpy)=v>E>83R7}i*_#wEGab24NlAYri0@8eONqOB$C7=0jdwi{e|r-C-@C=|fEml> za%2xIbbobTL(-qUaj}-g@6q9~;vurVbs^#f##fG;2Nh1XCc{n7ULu1PnmOXWG*~U>#^cb?kkfeG;-PZ!)V*c5rKx`Cq2~VfNM<{(V%H=R2ViTz zpP8B2sgE?|@!iLTOYi%Kb{~+OVi!K*nmLKRgG1kS!W3JPNtA~<1! znX@yOzjImUi_)2&Gd6RRw48t^7$e}Mcb@0H(bAvO_lvQtcr(k&%G$v^fF}_{;G{`h zcMp&KOK(2QFB^`=P7~l~jNmEN2vc)Y)BFjjtBVDY4S{rzC2$bDw6p{UDE&#^_8N;p zMQd~?jkTm{Fw;1zFe^`YZ^7MPtM^J!xqx@T&WVYMkmxWg#;Ldj<*xJdbKK5z%dz#> zFZWg>9U@L2*BsM6Sd};{K@__-Ha0vhV@UTNuk5LdKxRh3QkE&)sguo3TL0SdYnrWJ zykzANMM$y<&;)2;U~uCLmyUDeC&l7o{0&Y*04AkUV2DieIIUoDr(ETNH%+M)AyC^Y zWT#We-`hIhx%U?f>QsiL!_r_fGBT$P8_TkTtscjn+#G;Dk*VtY^gj})@tmr1;6^DGU1(UBTb~Sylu(aG+$;imK8KCy@xc6uj;O3t4Vvc<`pOqGd zWAbpgaUO|k3@idgooUA_c72|jvh_7aFs!`Y;+VyCSu>F1jw5vy9;+*Fug)Ye9M?5G ztO)OlO8v6YT(959B-j&h!CX7Xy>FLH;Fq!dJz8>d?_%Wg@^Xe~Rb^<7D8De{r}dJ( zgS~r0qiKycQxhv?EFUi~S9a^-;@+oE9(M{Muwn*=oJ&ioge0E3l?hdPa8fW#PfrgC z=JutoNp|PMFe>X?EzNZ=ckh-j4Vr@BatG)SRxAr&+k>H8__%5UEgY^0Wkd%|%xsy< z__P|Y#zw*`u%K_)XtjkS z0kMA~^c0~qv=LIL=;b!2q1`6LfuuN!npl7ePl4^zKG3GF1>!h42}t_5Dn5`a8Ozzu z-d5Z{m4pNt8Cg2?I0&Xs`n&^kHY5b!uKp#}n;5!(}lOR9MMI-5+m#=*hS7`25EA<7Y2a1uJ8aGVw#Tsb&2WT%`? zq86(tf1Cw{cF#^uCdd`<1s@C4(6?r1W_I<-gkI>Y8DL8W!b3oP&!Mt7W*E1(H>5}) zgMd1IF_q9Z2`Hmn!uTJ)cE*p#1(P=}FE9IQS$$tzjO^Lq_`Wz1_pPq6G027&bGXKC z&lRXRY{3;f%D`~Z(E6Dm;4|||PY&|K1xmOs8jUb6U`=8&91Gu4;>xA@V#RVE;ojgMRSUkmf|=i&+4LZKWh7?KcCkU-{hN-C<*LzbmhBs_Z|5z!)uPw*+t z&Fcd6Z<6>CD{%`gf`()mIWN1q>T=`b@p=!khAh@I-B)UJ#xMb0N&l7fB$`ouL&F){ z!ditEHVH}|$f&Wl)_HsLxqt{xPUiy&F)`X2DG@0)s`$8|OacDhT2|)VIQk`LieKH- zT^pzXhwo!?+tcigPvab5xqn4V2M6WUjSUZzepR!<`uaQ%WYww9tMC%MLI2p++Y2to ztMiT@y>cgJ$Ct%yEa6F}`FRM1I;=T_N^i$J`G!Uu-Qs<9Kvp<@6;U~=d_w|+(f?Q5 z!Ggg*8R=^D-@Qr>#4ZaQk6Tp$o?=NtmRed{u%3N)h3U1J%h5dIwiH%g`;EsjmSq;n znwxv2+u5|`ncKKbrGTz*u+RXIl>gEXES#C0)_%^JS6G{uhYP^T@}+FqA68cIeg$?FpMsV77XUv-`iWm97ghmLO;sr#uHMTnl_{Y*IxC_<2reJFKlP* zkxPR|yP9?n4rnL(UkaM7&r8hHWZL{H5N&8~$bzn8mvACwj?xq3N2Lj*g)uPHj#rld z^+wfo2v-N@(i+l*R1b&3q}7HA6Q9@7h5K@OL>&~E>hja31IqHV!%dL~)Ay`?d5W;L zf%H9Nd26JiT|R%7PHYRj-g?`|sJfleQGVOn3WtLvg8w?oE(fUuQ^GgCO3;3t0bAmG z$ABDJ`VZA6CBdBbX!w?s!M%1z1>7n+;ZV!jcfd^nL(hg%dt2KJXLu^M+@;#W0c?I1 z&2qm`oyjna!q-GVO31f|sF7H~TH`;~duKsQk@b=C-4gkZi%e>xegFW9hWx>vTxBvv z{)tJ?`1tRtq9PQPr~wm0k=hx&4rf1I-HeP(L<;gU+vUVp+H{zc@7abd5tN4`nAp3l zq@0|bl-f|XHcs8VHaU@i;QszTsUg@+J{u7z3KH_+iU>C-OI&My{%tHEE#^92pyOs8 z8rIB1?l|M)?W`7cHC)`ZSPc7t^fW=kwJhO!HMa+FM@s;d*Nk>x)>HUJjBs>`2;W+@V0Pw7usEZlO-sS`P0PoX0DOVj8!WyfB9?3uDGw#%}-|9Nln4F zpUt!->`gM4+C{F1Ki_&7=ubR~NsY(H z<+z|}xy`ueXM@SiT2%&qr|UgG=CZPIqIkTgOXAXk`7`(Sg!`f?^zx;nOW&5^W8O7P zz;`DT{3vg>GaUv~*$vAqM*Qz@n&Yt56j3k&?$$zOsQl_6q1bLF>Zxq+vVGUleaRdv z99r~ebB*7;A>0mj*%__SYqa0!jeY@!au|I@a&?t}K;9|K`rTfck6!N2$y_hz!UsH5 zV)%U?4scrA_Tt)1q&zp>N|)TA(DCtHkOHGdi)$FeyAJom(ex*>obu5QZ0xl5}LCBOqr$8(Zw)qnbTzq^yqQfRb{Z=dh zy`Fm<X%W6o%JlC^Ee- znX!?tu{Vy4)oA}Ve_lvS$*7o^FB9W9IlbF$EqLC$Up3Ss=y7y#V)`|AZTgeOmKm?r zB&|4)t5+fL?D*L?MHGOOO(2c#JSKZEj{_yIo$mS=J-aSpgKx8npz!_vhp^8i|6;ua zMH?~N0Uf_Oz`r_JVGU5p0Hbrt8f~Bu$9P;Gf4L{lN!eXn`#>QaXcXumA?sj;PYdi+ zJn%DZD34ig&&5TedAy%9oHjGwp+EQ|19_wJLjxit0=fT=7X4suwe-y={=|Uc zRg@y$gaEPG9Szz6xS{{`*6-7ehn9BF)AcIO@P6GAVH5$oS5xOiW9$^O3{escQM*+$ zHKu(f;yg#?+M&5Iu3<292}wx8jQfmPqRQi?LigV1a54Y@A!4($EfHP}q-T#xsiGnQ z3@Y(YqrK(t}nP9Y^JwXOh*jEfqOCFVd0E(*T+c6)5P{ZYWhq9TQs zHW(Hwc10>wO*4bcNQKaA+($X_xIs9BvaDPP2GO%>Lb4B?IE@Z6QAwPp(spTV?^+^= zm;=385{3E(LnPB}XG)XBUc#PUpA*cTS$z7yb&<|(x#{fq3JWMG_bfZr1`Hj~{lOed z#x0Hc-7uwdB41`P^X}pH*qPAO@>XrWQs=XU)l|`p9-T}Kxh&`RiL3CS?q-vKTU0Es|#t17@Kk_o+X;)MiP0M82^>)^9O zG-@wAl(chW9H4W)w&CJnNV)gHe9aT@-HG+=buOSRpox$kN^fjol^;}P+3c0qKbi?$OFuafsm2kHQCSoXr zyeV|sz1_lcf{*f3jgDK7zntr6OjK)D=$^IB_au5oo$-x#;fNi))v^10V1-a>bWFg@ z&Q3y1MP9O*r(kRXR)k`Kp=fBzq++6ySa`e+7M~MbM%YS|rq4n!<8Ya`N($T$7mP>S zyrzmO^cq}$&Z`YI+AYgFEYPxm-{81GS05_)RtwNyo;9Wr`-rC9XtPD4a|<;qkDqJ~ zBzm)hdUi&;V>Xku48CE%!@vXR?jJ8TYZUJJKIbHEq>IJR{PAJJ@#M$1Ne;aRTZL3M zbLP85715fNfoHn)vohi(pVv8;+I@b_R_GNgrU(B1_(;zP7AA7qF6Th#S7@1KX(-$s$Dx**yfVx zozb?Z2s(S~>Jp~RWgzv2Yvu9+LVHOv?^+5>je&WlctIbP+s1JzWcGVG+iSGN$%P*7f%s|IER^%wjf?G*cO8_))%QTgi(47bbUPJuI5xu9KnP7IB`{8j3 zTqO*IrioVFJQ-J&K|N@SthoHHr;1)k{IKZ+3z2G23HX7I1yISU>HOBr-UwT6!t)o1 zw@9p7LX!eJ8s*@y`QBI^q&73)e+W5ieUgX>v?Kr!&b^Mz6WQl#j*CO=?X+9LE+>;u z3l_rgUb-G*&DPWwmCl#tkXUiK?y52^HQS`Nhkxnf;DpQRPvC0m)j7fmm2*V1u>BI^ z=8zS)vC#Offm`U`0t2bD`hK?cBC4HUB}%_q(RQE3V#$GWozDfGpT~r|1b#_$`T6_` z44Ne%@;WuKiq$)-PGq1EJThhp&Y#pPR?dntgoL7xrJjmI#yZ0rZ`&8fb}$@AW4b)TNU_YMkJ^}n^lZg-GNiW`4e=is@{ zq(_*iCkY%hRe8kOX6`U;BUSpncNJk5JC=hgEs%Dj2Va4kD?Nt~t_INGr+rjDJRhF! zjASv(G(We(*8&%C3I0}ZvgyOdFeOTze8#Mhjnk0nC8uz}A0<^s<#a>g0NxZf`b`tk zBwWDUTElz7Ky>qKzpuqas!M@{%}6DGF48H>)7p$L$Nwm)ACWzw8fnX+`Ovp# zR5G3Hk_=5y#=xLq)ajtR@u1FK0vxJc`L(2tW3k6g&OVJ$+6VD0tVU4B-QD@GRli2d zF-mZ*X1P8(C@`V2Q!)tF$iZ0s#a4DCldno|r(?QFEkBBks~==f-_Ifo4*t+&>wJL- zA(qTK8My;HH~_0o5e6A5?~~UV-9(72au1vTMg0e+Q05+7mk(-K4EMA<(0<|Di@ld2 z^9~DE$(q`Uvd=w}G@T^6I8dSe17<|ld@a7;xprtEK`>z+>-iULm7qz{gOH7An6-w) zdkYr2-ESFjGJ#R*PmnDcpkc4usp+fQ5w2-U5Tgp8(z?}+m-Bm}D#1tZF_B;5(I@~V zcjIk|jq35n;{EFCFc%U6RV;ImgeP_e1pW80wS4~dw`C&L# z_zYk|jdlEU@>@ARJHuYK(#CNn~kLRCh|k; znU}z4<$EJV+w6{|uOW%cB;o>s1LJ|xGSYe9e@I%(5-gMLY@p;}{HT;7s&>4C-J{OMY@29yt`2_}0D7~MQ8~NX zKR#D$e0``o)4>Yu2X{gsGp>Z};UB+dp1G2N^=nK*KF&C8^!DhcqsqL>hZ@U36ziYn z8sAY4d>nkSKZPub)!gZDAUg{6GD{ zg!+(zzA;QL2@8p<&>&^fcoG8H_i$PT2z zN1t?d5Q`8oQFJ{kkkeF&aFT*}tL8~W#o%(j-x^V1|H8KsIYK_2!}~f6 z3z%_GI-KG(KbrOUZcED$5UXx#rj-wu#gOZUi?4u=c868@4m+d09yu- zLKvT)y|};L$zcwhSMqkl38NQfO=>4ES7lYn7Ir4I)2#fM2VdY=$lM&wT3n#5gvy2< z`w)`vPgg31x~>z|>Yx--Ngk>bAQN8Bsvez4}WvC+gwHik{7RuYrb3(p28J6F4S=NSOcOrF^QnUB1HA=^{p#tCkNFnRjV z9=_ZCsh%YQZU+ug$}|N_%TPv5dfT(Xl~cP}Y?F3N<*b{tNcLwc$WIVL{`Dd^*n4Rc zpPuOKnC_OX7m*U5dZ}(^30;Vllax`Ovoi&36{Z$^ z!G()~H?Ma5s(DODs+Tqt+P|oL|L!(2Gm0|dvZI^N zq!c))^9nao{YJp|clB!siAACs^X9s9Pknbilu@YPw*IsMUv7;8tE`upyHumL!*-nG zbsor_^cC_CoF>|@U(Rpb_6sarYVzuH;RyiQ(DG4c?Vorxgbi>IQ*m*4TQKb>Cnw=? z)n}o4EEhpQItQJa22G}-mSnO+Cj&}gX@`!r0QAa=wW;gH(=;4E&SIA zLuj^jxuQ7Vk-KP(EBo6?g=_~bpAvikMR+e?>+rKnJch8F9&@FGVfD(QBT4dWL`+xJ zLv(4Vvi)zYzO-@#qD+wI!H<}RVgyD}jfA9)S)z#X%<{6z<>L~@jflnOFA+1g`D*y3 zaWQkg&F+J3ZRUh3R^0a{Q`G`idk&)}$0PD%NwIZ=R4VzzKScjto;y?vV9=5>RM+s^ zV*=hNv_18D9l;Z>0=?ksRYG&g+>fVz5 zToLhU8;|Q*Kc!Xn2UtA^5~Ioo%Kol35kLWCF;no`Er3supEh3GmgzM<92pBX@~K_? zO2wuSG%Hp?!T8}A9~?EM8&i!tSt$H9$h1PmS2$qEl4=Rz98mxpS=~LxFb3!2>NAy*MK)>&8AN>n7(b}i842&pvOe}H-x}_&w#zZl>btW7S(m3a7O->{F zxR9X3X`Elll%}cYVqm6yF+E+MdC3$K-S!WZ-oIjzcwdaDQu87#0g7A*JTY8$2J1bJ z4N^9Ux^av;zZ5vEe&Kr7iFII`9dIP@jekbeY^_A*2b?6$6!@l072X*|#?AOFYp=UF z)t2z^_VO^3&tWC-IfjwmAQ1_>YX1ouEYcHy{mpbDOZ@2xOGrt=2QzsH3XcDM{T!k+ z^s>6;1n+QYjef-Xrs(QcdNo96O8D%ftsJ*~oaVp*Y|1#kGvwa9=$Y$TmQewWj~S%w z^Ls5c{CS>5^Y3oymR_5evkm=ggq?3x`5b?UM>p82w9(=!=1KgViMR#4;M%V|A~E!R zVnQ=~PBtgGIlIJqaEG7u-Jh^8SUB0Bu5ToEz1+_#4D6{J-qX3<6XE$vC&S6huJ$z~ zG_${KuKcy%P?q6YTUr`vPHvT+ody=LMNIEe^mOQ0DK%rzPi;{t zkMelPEYp*ag{4UB`-+&G1<1!~dDqb^Ggp9vo9-_98X8M45t~~250aGe=j+5mq#ogn zOp}|4?k(mPZ!ODv|6c!9Wwi(D>&!Io;^AS!ZAMdzq%Ju-D4EK&rpTiD{4$U8{*s^t zPf0P4VDs_vEgInoa@B>2iHAq3OMa?Qj#c8d!vl$$V+kw|2qTG!j&{ob65IG*I)pwf zS$^xyR;{(R`5>+JV>ypi)V+VbC-8V z^?rht!mSxg*V2tHc$e>hX^6j#_u4tWugz{-*d3|zyaBw0r~$MzHI-8vAIJM^;lK#% z`Pmiak@b+XPL1ZSkdP92HGo1TEZ}Y{)iWm(F%wqiQKYbh`V|1>H9mn}za+)T_ICy0 zLU;&g;tV*Txayv3Z5$&)W}U8^*l)Wg&#D6Mg4{;}__4R)#VyfQsRD*yeAD|h_7wH% zRn&|?lVIyKL|4;kuPWfajIsy;{&yC(|L5^SZvaEKp{L~?U6?jQ z5tp53`9NSPyaXnYOk3Q8#s+TtqW${i5Z8>VfOkJB?+@ zZy|VU0Kkmuo30Yk?@RMxrJL%V8vDYtLNg~eVPV$LKN7RhkxnGJ(l{!U+a#IEFvOO|8_u$f8&2Rn?X`uU~e$-CB5qKJq=Vh{~s43 zht}J@FEQ+_VK!2+U8kp~EJJVp;$^J{h%Xoj4^K0n$2w6&h+mYus2mva7qiK2Z!0Ix z>1r?Wu>WDzLZrH|XvjALbp~o!WI+%9hk@{hU{FxdamE0qsp7RLTWVy%~>Wl&-F>L7Qj%t(UWC)AGfID?(!XPIYw+&fehJ?942` zhXbAVcc-tk4RqJ9_J96d|AvQ{d248BjAe^-Z93=J_jkT8$+l*sDTTvS&|i_(gosA)r{DfMa(Yf$A)1xG@xp&I_0DRoakc4u ziatlwU$WWpb)~@qtw^?^NQLHkdjdHk`t2X+|0QH>6LTW<;o+f5ae&XiCDzq2AfPV6 zUXdpXnB^!c@a4!dXJ+`_H!I*Dvf~j7?urT zvQJ~p_*(-6F@=BmZMo8KCf8WyK)nU=i{2eZ^uV!Bf}?kqs2f$Oy>u9@W(}xI6Fs zH89X%Z9{{!w=_EoBaKKep&*+Zaw*8F?fTm5=qvegNl!?~32UH3oG?lx9?{oFWp+qs z^7n;>h>vu!#=4;V;k$m9HSX4R7oa|UWK+$TQ3vNY5Z6mH&j0B3=6HU7{>1M&M#iSL zeo1~M#NkXL4I`&98ChB8jZAdm=QD%>SXfxp+D;dXjShVia;RTQ%9Pgn48v8sW@isi z`tOR=-;}-mYbL^8HU$c@mX^g#T@o+p7kivvMi;?^vn!klvr6c{<(TZ?PzoFE)b~)$ zRnmEU{P6-V`G+kXlnn^2w9sf+9nMH<`5crh3jSk%p`lV`aJ(RR ztB}x|c%c@g2n#kAzjIp-HA^9=ufUwc)chln>Zy+;{ zRRYs-Pt03}3*M}@w}eLtzwab@xG22YJ?TtOPk%GiIg>wxg~Qxghj0W;@&Ewjk7Kjn z=IE#{SzBAnINMR75|w=HGp}-Ru6{8wj{XwfgP9K5ARMhrB*N0GaUT$KOGLZ=BH(JYlR1XP z;W1!%%*~yC~Lxz`3CtN6)zndDF7B`R}L_7}Z$@-c9{I$2aJs-J#-aS}E{yDHVqzQV9r}|^$C<>!;gqNl(ZkkUG1@|osnn7zHdbeF>P?k=N+ z7*_DQM6T)=QyqeU>mkE+dg4c8YO|Q~YGV}@oFAmKKtME1fF>W5`9_AKp3I%iL3TK* z7#(@8+yJo2Y4@Ya3$&+)(i{ipM|oSx32bR@CIhg)TUxJbTF1DK2MOCx$77F)-m(nK zpe_xvw_m7hE8kZSv9%s}blg(Z$dq#dfdCgtX~;|#_R%uGZ76DM%Zm4{-8-v;nBauo z-d^LZ=sjrXc*?ega_YY-!B2O;rxMV4hB}X@|NAsHK)4>cQMWwk(hmN??Z4zToGLcSyV29Xjn)D%yHmq zK{q+q1`3tcP)ys>N9QNr!vgP;nyh#J^jI-w2X9{~Ek{nMa!-UzPI2;>7ao*XtycTr zBj&(}m|?oSZ1xnu!xZ@++SC%_TDZ?hG{WiYku@md+E~D}bk~T{qIB2#Yly7`Vco{Y z$(!|wR;!KPaF)OW*N4F8Tq~~5Sfvj5LHx9(CDkOl;!pC8Ux_;`*V5T9t$BCZjZg)G zllHbG-d#KFuY>;;QE}cMRr_ZBJBaFzx*npVLzdnz(G^PNpGGI$l_$|PyS;rfy)&}j z6WPa#-KpG+?>Rbw>SIt24SS1E11AF%hCh%Cwsks`Lbcysru;?T?f!Z!K(icVB;%C< zra+Uv>z5H#uY*FtW{$==uP9#tT*v@h8bzXt@#7$iq2`Q19J0TPujb}MrG@P=**TIc4WWr zOk_~O*BWJEdZmag)EV&J^dAT+b%_4&B(|r@gsH_|h%kLDtdmE2LnPL?=%(8FVZ!>klpJEKipx{z@{&@0Uh3r*1&C*S6~}HWZq3By6f6*(+iK{Q_=L{AJEmznM34JQfd*RA zjtguJc=T8H-yNg0DJ`py%#(n0H|R-l$$#Lj46dwXyAS60R|mL@avm;<3BSM&bH!ct zB)r+R3r=|4yG;;28&o7@(N$4ZwF4SN?RG8Raqv0@O7=)o*a83-__W0 zwVYCB(hSWoco9g)k+I|&&nvt&r}NWR&go|p#33Iiw3Z5iJzf9sMRZjANhVf0pAN5W zzA+5GIKn!YF~KpUTWdCeXtrQKA5;&^g$&OAuzy%weMqr@WH*sg$f<+$wrct)xk-pQ zfSqJmfSQ4xHuq)0YwM|8(3-zODhr&HYY1L9n?g*v+2ozds@H%RT_TqF;>klA>hy3KVUy==Ipz*ZtyE+^WC9VP1+F8k=hZg9k-?Rwouh^=q8C2$*m&9CMHNHu$ zfsW(WT=zy`j4Hl%hO_bBOL77&%J=r8owAQDwe0(An}B1hBLuuC`Nk=3yW?*cn8!y7 zFO<4#^g6dF8Zmk%i~X8)xLQKC49jPKw$ePAALkh%iTd04kc(;hwg5!bVfy@$+@>V1 zTcp+FvzeJ^nvu>GzWeLYqe`}zh<875EsfRSAO=)w(fRKKaAGr)v)V$1OFkZ;g{>wT z*CJv$MsBc?tJzsHrROkeZ*Jv#gOdHrd}H38d=ga7N;9kv!scDhLr_xws9h1S&~Kt- z*cub6m|kD7Xo=7A*}2jq7;zV~u_A!-8-IqT{+or~C&jD=LH$DUH5-W8SbRGNeTTub z3}cd>Aej7HHda>iy3R6l*^5#|m~>ZE*3{XV^N4?mL#z2#Qqov|Mb2J!lm2HIOjA$q zh5gndL8r`JH-U=d>hvoF4o`1?h)ujeED)?ZP2iSxbu*rIO{d-v&hcd_;@$jU`_X~+ zQ!wr1>1TbvNyN8p1_H&FM!3jO_Q*nUGM+feaOl>+;mS-k@DRyR{hy4R5@Y~wQ}#`* zH+L?Hdb4kip|zIBtF5V-{N|2bs8~XhU&b5OPRwGpQrV1d0ll)+3V|f zsT#O(Q!2(cOxjyoJW6WJnh6f%Xm*u%?EK3-e zV4tUm8NyoB@HlL{f86)&T`)9EA`WUCOgei!A5V6kGNBr6T?oP(u5~tiBwKu=(WS*V z1ch>9N}HOR1_X3G*pLBU8Y*_;yooKUtxcf&1eN6%qJa8_XfVlyp`+CjRk^Uu6;n#Uz@O@iEneK8 z4vGNG9ay0yGTfqDC_vqm3EU@agjs52*2DgTS7^`@j1bbtVAVMN+L};4&%{qU{o8d5 z!8&T0{cn}%fTh|RmO@^Mh>a7q33@WX<&+u`U_e&`C|~*pTs;317-Lgp=U_L|mqd6U zeq95nkx7?CWy1xGplGyRDc`%~=N&Zs{H#*F z?Y6~j-*w(GyXcJtL2}!KYWYI7F|xug`}PK2(OXHHT45x{;I+Y_qhc`VEw<*<25`XE?X@$uL3c)7;458u`_EJwICd%;}p z)gSkz@{te zf2C8ip4U8%h~#xqu(pfXY|^Dw(P;y|=`St`cGc|<7jTgs6zq@t9xj>ALQ4WIRpF7= zA0sRBjU7E8{&3KWUdhq?M1J>4VV*HHH_i1kyNpY=T+rkxSii(XDRlgO^TUdPg)^o; z<7sxdx-8q8nru!#J>keVd@^2ie|2&B89+-53|l0>Zbcec&W z`G{Pf3rHUgb! z{TH!5R|ikT1+emsH)AC@TlI?|%1@h>o+0ZewEty|nVkF400WDoi_cc_hxjEaNP)OW zclBOJ0emNuAU7Lh1S$S!(O%PJF;;BrfFzabsLhQLMG_yG9z9X@brtWlrgRtDR3iE_ z@f{-`TdXg4uYOP$$BCJN!9ZQ+3Q!~Z3{n%SO@JEEsy9Fh)rX+-7 zkY=qYngu@T=R=*704e_SKv5Db(8u0UW#iiOG198o=U0}{lAC?*)QDZFUA2vCfWpGQ z-JGhzh^i?ayaqXxLCh%~QHPMSg9d#2=?II6hn)#>IL#F-B31v?4#duMtG+6cdT7+UZ8SE6bz(Iz_M8FFH#s%iyl_1$(61I?(6@fX5|;qv_85j*3`C` z4l=|k=^30MT1fE{39qy$SBc8lG3%pcA-%R_l?MNy90i-L8-zj;ZtZ>A$C~@PX+&>J7f~A=MQRE44aYe0n1_mpLFDWf9DrBJbxvQm> z?8P$02PNuiS=rfDShQ~-OIOb8Bdi~8PWH#*5g;^d)YWMdtmBjTitG4~Ec&H2rX{zV z;Wf{~%m6uvc0g#v)}@;pB9i~vkAOwcW{@KA{{!(%TpEIiamJFkf%$n4{Es~uXj;iB zDO`n@DTv(Vo$A}SGl;oa+PwE(t@&_1H)5Geo5Qc?9|cF?tVucW2YP;^c|47}0k=1^ z+$-^}h2ggpe25C_U(BLme*hr3$AUl?p)^3i>F^&BAU@^#qd^kVU1sdG-a>MBe79_} zdWb6uDJ-0S9{<;flp#k?C3}Q^&a)yKf<0$jn=J{ox?hZuqsv1Q67W;1BQ{ZPrqY!> z^HmU8w(O@=M0wVRvt(I?$OC+s&iz-CEE3P5GxvUIbbd+GhXI zVjE1;Y_5>1si}Fi+8F!!$yJEpK;5t3)FctKpTm{ub0T%U<7oy$qG8~lPb&@DGh%H8 zqueybf3tWi{mUBg62c!-J^t>)3HaT`{^j?&3!(^^RA5K2{a|+@pWw3Z%V*~;`Id=m z#3=jmX09WKAg;7=yE9J;@)}-B_fu?(z?|T4tofPmUjEkg*XNLt%|Iy# z8Mcr2Zx?5ijo*a+wZqvWtqn!=et$Ks2Q4I4_^cl>fU0agYRP2Zg}tyh8ldo4`i9Iu z`G{Cf6MHy~{lkt=kI{T9o?FRUE6zi#gv~ra;uk;^M*Sw}#ORH8wtI#HU(r~})MB*@ z!*^ukqh-Xhf=Iodo^Jnacu>$t2oVv{-XH(0gDBnzieATIHy~f)+ImbW@tv+NI&|ER z3LUpy&g!j+iF0@*R#sLHv3GoY3m2t@M#H2sKZ)+s6mk~{ zQ9o`KqXg+69R1dOaTWy=pj-b^ef7;nm4e0duzpVTa#|zj@oq=#W+Jwh{D-N)utyJ( z*2$uvZI5foJV{l@gSUZ`_*B65aQX!ag^A4FMX{J2m<0uF#vX^!OrNxBdOT&~_W?T; zn`)b^Drl|#WL)*ZRae1jH_3<{K<22gg;QMC=mb)&h2PBq3N6%HiUs?Q4olxz0Xi3f> zVJ0X!ZA^&2VoVD=7g4MI(7H~=!~q~vpUJ_EAI9xdm-@pxH$OgDA=(rz7}C}Mr?{{F zi>mA1Mij|GkREdAk{r5(p_L=um^n!uFj{=07voDf3m6Z=j!?c#%k9)ei&z+79@xlNW+nOX6 zm`2on+idh5ZOxAd&kIlhlh~?}^YjZ(Y1gl`pF1`;Hz(W^d=I{S!0#8%!BI*bRn&NY z@9~u1UUd)7X+99(*Jp@1(TqApj`x>zLtfDcztP*Oc&xX zVyutl!3sdhjBp=u8c}%)`<%*ejqe_wgke`8SRI9Iq~o%BsV*;l{>bR1ZI*SWx4f0y zV3_f5A`rm;FS$Bsqt&bNx!vFn;02R1g84YHX@%q1lqw##i_8e~sJEsMeDI=)LOROB z-qN;H3fhnKRh5;kA>)6rkbp#@rERXseh^fDn9w;MU~QHgo7Y4t=E3LbN6!J&GHZn3 zH^iF0?vVg)DDo*I*@Z0Z_3yML#!$IM3;B)_M<}N_bfO{(XHY!Hl_~;=))IIxGPi+& zir%yWc3oQt{Hly7UXJHfkg;$jeH^m`ew?x;@kf!sZM;y8_=--CxJXd4_2%k~=tr-v zj1q7pBw(d8b!sB8?xZ7af(WZIv^JURf*)%V$A?!o?XnYE1ok0{DVMt=BUjE>&&*i^ z$bqMk`vSNI801|p@5+PCWYMItPj(W;6D{g27K(A73^)P_C%hcddA)3G*fwFJKy)K4 zTY0nKxfGqDp{S>$L$dR9{)uKW{)A+CRh0(yItq?6#xm}&IiIZZpETC@Li8f$XP3Xt z*pYS<_~@IMaEU^}-7^3}D&GOkZHnIJApy+_WOY`hct);GJdkrfkFc@UQPLy5|@vG3NvT^0Rf@WDl^=i8j#Y$F3zOuhUFJNKX zv_#ZkkhwiDV|kEU(3&qbb5l_oCN@c|1WUL7~HW7$<`5 zTApPiLC1P19ZocyW&@24}I4altZY^rJYVs zJrQQ3EqT$WVTNRM3uV(vJV-y_1M?BL(fT~1?zelm`e`|#9lPVp4+Od1-}%&OVXk>U z!&#A};T?}qRD}e@DbDhg#~Z5rqP$9sMjl0ceMpagPue?rLz0yE7>BJ!xlSbbv(u9Q ztmTJMh=KhpH<|N7ULmb}bBN>v1gqZhehmI(N#DK=;skzHM?4Q6rgwmP9JC|P~-*E9q?ye;Bjp#2DCD$CVGxP`1UYDdK7iX;t8Vo z1nuc2G%#)c=g%Hm(qZy2A(Xq37=`TOw7LWim+s}F2ACNri7=`3^Jl@9?yQWbBaHCy zR4u*$OXzx`Y-8?AZccL1b^oiySM7h7D9UMpf=eyNFeMM~H6oQbo_#IT2?1;#3?m`G zJalP@ZmBn#MQmH4+_w19kxSr+uJl$r#9cFEu9YSHqzeEbwJQ#@Eg}u^xP#96ItG2e zL>Jijz)gTVl9vN`5yG?tkP;Tv@Tgz!@JQIrtZ}bDsc_-+e)JtBHIdlSN7jijURsB(p^P28|3)YZ@NAzQ12psC#LptWf9ya1 zv4O5H(NX@nnZ6_WvC?WX7-j&2rQmrRla%)HOw^>%X4v)r+v!GT)EHCIP*lF4ZqRga zHdVRXjZ1~jv`>pd z0s+jZ>n?UtE=muDx~^<{#MzmgASxT;(9qg)x4jdXJgQmC%X_8-Ps4ka`|$GeyjLDp zf0Q;lvKqqkof;`R=%197RH9RbUJVF&nfFuff#)X%Mqse8mBX2L0)?J*Xu^Cdo8tiKNHRWa!Ts1Xr}Ck=v9lt40)V@+Zr7c zrJeYpsLTa{8u?UwYnH@Kbx21}6ZR&v;lS1Uh9P+a&j>sCcVj?q*VoCTm76tDjBPhf3F9G;G3K_~YfDjgMX#3u&$@tv? z?I^nU0l22G#&Ur50d-kK3?_khd(P3)38Jf~eT(w4C$Iu<0COuPnH$lo^s?x=jbl0x z+>DN}lS0=qM4`OIa&BoNwRhVkgTjd0O5)FgX%V6cg*PVVytn*qb2?n|#NcLxFQui`)4m zu>;n!(&p59s;i`=q~R&Fpj9L?6<8wWX&Y6)ZHzgiJo3Kt3OsmF1er(UaA3uKN zVOJQ`$``3r3#;1pV9Q>Id{+~7KT5sVgclnh%XcvqU=dYSXlbde5R^Z2zDI~BF`ce@Z)Sy9D>^mghdn}ct3`_)wO zmmv&M_dyf{Zp=Li0|=}(#~+{A^m(thQ!d9tRNnq>bY7A1e=mC%F5hAHZnpslf()Mz ztK|clNT=3i-Q~y?&;0jmLnAWP3>LW?gN=j7d(#{5pFE$e#ns^Qc4%A|h)xx_pU2XD z%0Q%sD}ZnDX}#{eoCSG6wguNQFgTx7OG$2Y+p1K>GQry?TTFbPi1sw@TxzO28tG@T z{N~7w6eJS1K`HoAj0kQ;;{X2S^be9_3861*toTOdct--5og7%$N9to7ZuC&Y#gP%Z z^;MEJvj-psH`Xen`j`!a&2Jus)n!-eAN#fVS(Dvb0u!9C29Q0@ErA~qu7$Ys~(<_vI+ z&2P#1Sxk_AAQU){HrBBz-O~wweB0&ntQMt08AN7L(PR(0Q%F7~Y*rMi{ic;Y9 zuH_uC>h|raNh)a_RpmQ^WPrfBIBj5gMA-Oz=}s>o7hUi*UknoG%PL{gQIFcG*nzVc z=y`>+I=TvoiO)fRBI2tH;7{ArLybQnMgc&6ZtlnCUE!>A=(XtpN5 z@UpS($kzzb8~5q+rHKwB7u23MKXYJDC%k^ItepaQoc%r95<%SgLa&zq32__}+=Z0ble{wstLj znL8dJy#nPIthxbgE`z7A7&l8Bww@F_1Qt~S69o;Xt{;s^r9>Y5SvDLC)KXj?cY-M1 zP#~h;4MeQ#C-1owUt|Po<})viICHL$;oq(TwO&}k_#lU+R*7tg z)a!(4Fi7MDphF+Os9q(S&K&)yKce}RRK4W(`nfD@(sSgPz>fS+X!AOtE!@&_2Lcs9 zTg5a^c>v^?hYKbH?_W3EzdsFR&8mjBk2`LDgz9*n@7Mx~Z9PkJi6%*U(gUOWfkk;Tn!Jg&E z0DK!usP_2VMeF;Z58T6EU+mPsHPzWAz_6d2gJ3KACTzU&VK&kok6Qrn3OFY|lILQg zR|XPwu2=KmLld{aXO-`tx0{Au5td=$Ot|{B*PzRn#~Q~e+s5gK;>wpDlEFUZJQ)62 z)u!(A%C#IqnJd%mLXL9E3`s``thwf_#I+r_LN!I?M%`a|*cyt18|y>n5lQ<^gX-myR6AqIM~?{4HqcDJjgbUb29PuDuCD+ z(Z!ONBN}XHpLxWhzA;siV^(X$k`}xBb+Tk4xJ{NOpROP(V8`QNFO1!N$f$+r@XMOs~}lKuC)M zPJsBvlc(SvnbflY=7e>_8vq-*8RQ=xxl3ERX8urSzkb1#m$SV0QV(DhJqyGLQ%WWf zp>NbxoFFo{1dd}|+VmIXptmyeFl*M|yd7*hEh?%AMM0OBnttsJ2fA3WbJ70;3L!5r;`(Et!G~jUbu9TOdGp>R})&uzXfosH66Vd zuYP|QYCG=dkngybfh{EdFf>|2qsGS;ov(f^wi1Od`|BC0tgYJg^yFxuV+p0l*$F}v z&E&$nO$b1DzAjKx?*Z@$C*Z7ksj)sN<_mGjgZtd``wYywo0@o<^KxU8QQqai)s=d&@yL+A)v!$RO zLUW9_c}Sr2N@8Jw$~iS-)ds>9K4EqHY&}p01-XsId+x2vgvj$bnVy-JQ6NP@` z{c3PYMR-}fHXBOYr>HI@nOd! zO3-gy5J@`q*@}&;#X{JtDhBJ6Y@K^kpFWN9SOPQ@AQa<#oT?kUc=0+Q9g*Z`i(6YemK6O56OL-3B+ayFB|$D*+S`D zy!OssL%w|!QOQ2=4TFh!=|}-ZWR?jzH3d%R^*W1`4RnwjI+4LrsG5kN-h1`40n-)s z93N-=!GN>ylcJxR8p91Dtn#o*h)4A5{fF?BKi?=(bKo0r{M7LA0VNphoZ(F4W!Z1e z>74pGWCLx!D;J*s7*a^=A3XN^0NVu(my|P`3#OpZLtKF@mobcS*1)w+`P90+1E42i z4G0oytZTR!-o`AG+GO;KCUv;5g-@m;W-H%dph}8oCG4?*qgwbn3C*8>Ec)#HxmDm; zV$!#cVLT=xDz~fSDu7sSq>fXX~=^4FlKK&eluzm_U0tN~LaU7Pp!0 z2^$Jzuf5dU`mN6V9KhW&xmTF7A=E)-Liy~mj1Syx%{Hp5dGR8FGXy_2ta3|J5NstI z@>M2;59eC|A!c<5tpN)BVl0ELrNAao|RhtBf|Zm;l$ zb;aZ{30Z=_^ugKN%$~x!mdq&&pZQ)t8C{XR_YjwCEV`LXVgu(b!#3{yo+ChMK`qfw zJU3elY|7Ed9A$JHN5GAL^c#%H%|C(+Agxs>_EEMr&?J^jjzSVojk)gUh(_Q0@umeT zw=g4+7UuGFa>7Msf*~xw4yaiILXdleq-ds0KPL9Y-gyO_7;rs3ve{0V4gmaq%OCe+ zt8pun2#g+QoDz31bpMg~8x}*QerL!U+S`yxFz7cSs7?te^c8IJ3z2u#f4MUz@mX<1 zGxj4xgqF1GBgNb3!X4VB%z6Z&u1*Yu_$idO7xCzxB#7sii&q{N^BF36UI&@LGhAjnJJrwhKuzvyGYM0XEX2Dr%K+FVj5` zGZ)d|43XFwA6;|=);Xypytj)}&HWMRwSJnvH~Sk`U$r4+-8aA1d*Di`b#aoR!=dsW zGa6j_9Q)1AY!;yJChX-Nfh42? zR8Wb0O7jcz2xa|*H{}n?ml3yLF(uzf$4TZh4yTL!E-A_!bN4>cE1-0&lkf18g$Z^F z=dtbK%vdNIx`(fG$v_z~G4Ss{Y|y;muGfVuSd3^+HG6(`U}#V0$F>}cVdV1S!RL5s zYe03YA_#3}*hq0a(OdhX{_+7RG&5@0#T5-IdZ@fL+IjCM`Cr7Q*b*A0L0p46_prA5 zmRZiKqe5F`BFl5-UGnL2t&BEFsMbMa%$$K_DWDCY8phv7eJ=va+6^gu?ECm;^c|k! z?SM9PFSPB|LJYM`6Hrga)l~>0E(lL5?!Fs*)RpS?F6_WjM+0=4=x z-n<-pCvEnZ*y+dwJuHANMV}lyNJ2=%TO#a7#uQThv9&Vhmi=vB&NuGp?`eWIIQFf? z+VzEZt0CtYG8xf|3}Q&ySvucIC1kPMguayDKZji zD&Ij*+Q+2*xu-iwo4s?gEZXfRcAy-*)4G9$UPWa;+_cgWtXBl%FNw*i(M8uK9_ZDK5>z__8~5JZsrd;;)58WtFZ*V<1@_28 z%|d441m*IhBeFG5fW}K^L1fr5l(ENgF&IogpjRHA4e?#!*_PrHAAGY*`v8C6(P>&i zAHZ(bC{d>00wjr-E~o09naa{Zn;m2(UeeDG^Z^E6xb&@rHGbU{MeR%DzTo@!`^K}# zL9ijrmEeBk#dpbQ_Quc&ag&<6Xy0hwA)rPVv08)AB)eiTh=ZN|Ty*3kMkMoi9^_Dg zG*LA+;O3{P^%tY6mdjm$X4j>F%GVO244cCQ!1KO6z!qg>qUeBBBU@Oo`ics$Mm05j zGFvgsg~f*@fU;Mrz?Bg2Q9uXwrumZ-ypDog$cVh9FKLkD8fi&R+T&zI2;|r_NSCAD zaU=MhLAe2Z9I&SZDkQjdZK4o2{)N3F;Aa9BYX-21zp0)B8{9QeOM#^#gp)|8>2o9j z=rp~uqJ-^@y`?1j_(lGd4lfufbu#tDDA7}u3L-%k9fq9e*4eY7hlXHqpn)~2`q0Gu(dnr05kQVxtNO_m$DX$I4&Wr!rX@Q^Dk1#x{ln0gbRn&tgATX_OH-J=ivuI>8RW zDrnOLDJ<*1UP)lS&+2bN(j2i(t4MB+iiAu7dNII%z+vd{PXPE)`8bh9sx^^0qneJE zwml)|ffmq2ce9fyob;?JkFZH83ySctMUt12rU8F}@TeSO7DUOJ#7UFB?u zV#0D~>52X@iDU3Ki;4-LmoXxBFsQn7ihxcfI~e@!l>E-eDsFD^fvud1Hd+&JyD?(~ zRQ67_SZZY~8c;M(Nn^1pKaW3re$OY*#ZC7mJ#HgdZ^G_g@h|2ODyVLav2S<6N2jQ1hwY@qX_U&CfpKpya%`J3v>Z%=y`)@ir1?#j!{*L|j11}&Hp7SQ8sLKGGBf79PKps)i= zsQ8_$mvhY%-u04k>mf_U_X2!;mziV`>SIC>;TjR^Ls3>!NKp{32xHEml$LtXsHXdQ zcbe1KSQgb3GP8w3i2YeyfD`j2-I3X+Ew8i|-bOWdJo})W)Lxk&3NMmAwN}SQlx0$Y zh`9@&Hh+ z?r*)ALSVzY_C;^5dKXK(lXge^yRF8}E%7%`*r#-c0-K24KBWXFyD_8j2j#DSH3)t& zSqL|(4!j4pn-pTjp9q&F8n{ZYjiuZu0Th!o`&C2Jairw1xHK)#n^HJmSd=TZDHr9> zpOb{LH0W#=6;O#o$FF)MxH_;&O#yp{=x1ngm#L25roSpoyn7p*=QD&&NO2{!8*clsRGk*<#ly6X4a-(^4Rv+14rptHSLyn&heS+; zfDEQcQA)3(l=yz6@22B`X|^rvX*p8r=44#T0F+0)4G8=4IxK!MJa*ti)X>hk%^n-> z)^EJJNA^Riw*|~>?KsvSgF`zImL=&uHohRiE2$o8_WJc}U*G0yruE+w7%3ln?*V#9 z0JMZEwYr^g3B{6nvVFit>4zESMKXHZZvM;(x7$(?wvy0D`KMQsl>$?y+ZLlpX{&C1 ztHM2t7A)E&v0QU<|4wLDB)G%u*bKuo1~nFsl~4Ahyb4N6-v82M+XRnzxYxD)|CSpP$k?$Nrq#E z7`9eief3xOGkNO>CGXOUvy3=VHwCJImw$Ux?)D-%uCBhegK{z1HW}|g)~+v^GLq~5 z6U_!ZGZPzut*BDuj$JOVKs@(3S{cysZc|rZO>JrBC~ECnt|Uy3g+Q_4VpE^eel7y9Li*4h}#WNQ)H0O!Bg~rTG!1IlS^H zd3D%6!l|lQLvar$TooD)?vDIA(hd++R#sN(CUqWCnlJOpd*Pp*JyKrjKZn?;Q}htK ziZI8tWu)5L+4-I?-#hd8g7bu32WB2+%c={S)#5Wm+jS;6X8dI3W*+RcYhMP(qj>|pu4+P3I>~_)X*@t;-81nWz@Qe_mJRx zULw&lTkQL-bCJCWsY?DV74nIt7AOw{GT+cQoE<2yp*zlkZnJS^rc#$8yU9`N?V83r zQ;GzXBII93Z(IL0t$lFU?0*KWuDklS-;68guCQ z)X<>VoZdeqsg3F|oUSzI8OZ5r3rwqEYenrZ@2d4cP=1pc3Z+FBdNOt#qU8A}~a#9huU7UdllU5xBX zNGL7I;Vp%bNdblD7Nt0359~gSyEe}&~z}mf{`$^P}F*JXJ<#w)aNv@AbAFgwk3!4O;=V_mdoOH42IP19j*0nHaKz$;E8n}Wq2Y~o3zMHyxza! zCPwvs9>$lLUZDBlg^|(QZ*cPxxU+}P<^7JeKvDQR8t?DeF9wTr z?Si=Y-zR8sQ|fa322-ko?zi@lUyDjUj^hq#W7yi-0*!qGYy{|8qPQsEi#AG$|MxD{PZoia530~X zzepCAyKNV*pYH)NJD_HN1{wf}mOu(Mujds*h@PmV;2+FT;P{jUiYerQT{#@nw$&M; z*tY)saV$pCqiQHje#AQ|E-A4ARMXP(2?@L#3b8aR-at77pdJEXZqYx*{SMK%?N91e z{(1Pe=qDE&C#Nl#{^-rygyn2Ds;utkh9-uW-wXfut^D!R^K&nzsqB}p;OS)?9OL8T z{QUgJl~om8rvYB#YM3Hqt6njgS!r{Ni<`=-m5)7&!M6_y0r79NZLe$xAS|AE>(;=m zBGJ^;<2m`~L!%+L3_lw!qO`RmkuB z5H3#pPk*0lf(W_A%O5Ymi?3a_#A%HqX}P56r?X#s9xN^_{=gB7N{fn)F|n~-MN`C& zu?}xj&wue_y~WPXF3-96pE#J z;sKe1hp=0DWZ2qh9_SP()0bk|&|F(v-#p;qAzt5!AJZK-c(H&kdl+ z|2tmk)_JHwOgo)WDUC+eTxgxOI(_J zdxUsd?#(eN5+}aD`b~zl{I8|J)w;6TdqVT*+m6`F;GP=8E67D+Lb^m@P8j6+Der#SJ3#c?@QFVBm}RWW1^A_M-_Mjo$mH+E zpAM9it!!>GKLRQ-5d7_8WN72&rfI+cAd$Yk4hkqy5Xfg07U6Aw(5l%40KFeB=KXD! zlLM3sDY*uRS$dqg+&|U#G+8O?vgV}~b(vir_(@iQr$|xvO1<9ExONRlXuD<#xW#R| z@zL(ssds9e(}b}HAWRJ^xIPTL(CsKFC>XFkFD%$db859=Cyd0t9$A>|YO^v1>Zzct zQmyrzhK#corX?)NK}9vW@aFjX+K(@L@aFEY`JN>bNd#$j-N0D@aX%8>@-WD5>A2G$ zjk0830P%K}^z}`6Iy?i};h=5=x|`#^CTQC}9)Y}D@}e$*#P>8Mbk-OD`93sgT=}Qk z(A(XDah^A(QGAr=Jqrt*x4*Mfc7m>P*m^kAn1fAOdwc~xXiV;iB5n~=D%nX(nI09;4&+iUph`q?jfVKjqI2@du zG$x*;(z$N2#k{UX?`{RmSgXg@3p$hD+|YZ6;7j}R_HFRR=+YddKRF+94t**s7%buu=K2d}18&3#MLd>tm* zaqii)__v}?Pky#$UC;)A$FHbMva5+$tYwTkd8k$Y9NPNC1c`(}z z6?Mk^?aczN=IXj7nTHVQPGVr}+EsKLL5y=$~P?28)gwK2dv$cN|LL$m3<&Qd6g z%?s`pYdY-2zF{cHi9P+yeMaWDr(X8k*j$uV?y74$aUIZ|G<^6uc|GB%$4+vV4YASZ zL_-^UcX3oC6<=28>AupJppf=Lp}MF-pXx?0&B(}<&NDGMqVDqEQrRb;3&BrcyCiNE z!KAooZ=e?V394x+ZF#sHB(HMjdPnTQ?4G5QrA852mo8b81u7p-4C0V-576j9jTSSr zp@06wOcl}kEPkDR2H7bmq-NdAyyLjBFc6fG__fcRfb2pCuB2ef7UcB%f}2+q`-$&n zWNVft<7Z->YA1(qnqp*Ng5U8D`Tz~M-sZey&a|qks+l2a02)yjEx*Q(YA|YuA)p1CFrcgo>n#4v zrGcObFI@69H|%dC`0{hgvZ|}Ag(IN_jY)o&6D7l~7K%U<6k$lHdwr6go^A~pcmGk} ze48hC?{w(j?pY|pJdG}PlQ?c}M{=Tbb;|_Y?PV2VfC=@!LtgCiUPcjz2l&hVwCsgx{{egmZVw3r-qqO1w5sHzi1XkK~4RuM^u|^>gOpPmND6X zjlY?O&o=+Bm;0g@~ z_em%`nE~g&?!Ca3J#+oQ{X)5XlJY$FS+eDouWxN_t}zyPp`H)Ks$nNE|=+RA?64b9-=RlqOBsD;)&bH7{sVpvhAcjR2~ zzb`x?kUQO^xmT{uZath1(DP!rn09*_MK#){jdXs$in;YqWxVtxPPEOo%5%9m-stL* z(+h!Z;ny&hJ1H9JE#{~}e z^E1AyFg)B4kO`3c@2h*#%#otQ7+DXf=L@9Gql{eUE9n25ji7&R9qYfj1$+{lbBwW% z%6|>4a5+K0jA1Q^?(ou&CW^P#&Fvq1#gT9!mJk&_e4WVS{^HMk|;-LaPaG*xd`v|=Tn~-Qt(rogujc4?Q**(UY&q~ipt$c z)st`)0HFH%RZRJqv2>4N-H-QCb#ml=|vVE-6L*G3*^OUv#c)O`cnA4-%`Ikih z|KEFe-qyM~3r##JvT1NAgnaFIG7xWeGMjMX0^YAgqQ%y4057&{s#0MKb=Czwu1(Tv z>QAf5pR*B;&a=T9>Ca-t4~utHhS-awA<@)?WAZfmRx~f4uynYP5A{PIAY_L`x)9XL z(|g|x*}8z=2bwCUa@@ajiB~p{0Kc~1`agPo$4}rOTe2ad{}$3xxYJ_m07myR_E&p8 z{fZZ;8CqvXb?KkE^?GU82zY5QT2>M81N9%&Kfn8Lm+%WR(knB7*EFLk$*Id$%DfEv EKMrPLkpKVy literal 0 HcmV?d00001 diff --git a/docs/index.rst b/docs/index.rst index 7c28b6c..5a6f8d3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -22,6 +22,7 @@ Welcome to Apache Ignite binary client Python API documentation! readme modules + partition_awareness examples async_examples diff --git a/docs/partition_awareness.rst b/docs/partition_awareness.rst new file mode 100644 index 0000000..5382dfc --- /dev/null +++ b/docs/partition_awareness.rst @@ -0,0 +1,63 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +=================== +Partition Awareness +=================== + +Partition awareness allows the thin client to send query requests directly to the node that owns the queried data. + +Without partition awareness, an application that is connected to the cluster via a thin client executes all queries and operations via a single server node that acts as a proxy for the incoming requests. These operations are then re-routed to the node that stores the data that is being requested. This results in a bottleneck that could prevent the application from scaling linearly. + +.. image:: images/partitionawareness01.png + :alt: Without partition awareness + +Notice how queries must pass through the proxy server node, where they are routed to the correct node. + +With partition awareness in place, the thin client can directly route queries and operations to the primary nodes that own the data required for the queries. This eliminates the bottleneck, allowing the application to scale more easily. + +.. image:: images/partitionawareness02.png + :alt: With partition awareness + +Partition awareness can be enabled or disabled by setting `partition_aware` parameter in +:meth:`pyignite.client.Client.__init__` or :meth:`pyignite.aio_client.AioClient.__init__` to `True` (by default) +or `False`. + +Also, it is recommended to pass list of address and port pairs of all server nodes +to :meth:`pyignite.client.Client.connect` or to :meth:`pyignite.aio_client.AioClient.connect`. + +For example: + +.. code-block:: python3 + + from pyignite import Client + + client = Client( + partition_awareness=True + ) + nodes = [('10.128.0.1', 10800), ('10.128.0.2', 10800),...] + with client.connect(nodes): + .... + +.. code-block:: python3 + + from pyignite import AioClient + + client = AioClient( + partition_awareness=True + ) + nodes = [('10.128.0.1', 10800), ('10.128.0.2', 10800),...] + async with client.connect(nodes): + .... \ No newline at end of file diff --git a/docs/source/pyignite.transaction.rst b/docs/source/pyignite.transaction.rst index 7c6b016..b0301f4 100644 --- a/docs/source/pyignite.transaction.rst +++ b/docs/source/pyignite.transaction.rst @@ -14,7 +14,7 @@ limitations under the License. pyignite.transaction module -========================= +=========================== .. automodule:: pyignite.transaction :members: diff --git a/examples/transactions.py b/examples/transactions.py index a0c90ba..ef9b08c 100644 --- a/examples/transactions.py +++ b/examples/transactions.py @@ -35,19 +35,21 @@ async def async_example(): # starting transaction key = 1 async with client.tx_start( - isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC ) as tx: await cache.put(key, 'success') await tx.commit() # key=1 value=success val = await cache.get(key) - print(f"key=1 value={val}") + print(f"key={key} value={val}") # rollback transaction. try: async with client.tx_start( - isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC ): await cache.put(key, 'fail') raise RuntimeError('test') @@ -56,7 +58,7 @@ async def async_example(): # key=1 value=success val = await cache.get(key) - print(f"key=1 value={val}") + print(f"key={key} value={val}") # rollback transaction on timeout. try: @@ -70,7 +72,7 @@ async def async_example(): # key=1 value=success val = await cache.get(key) - print(f"key=1 value={val}") + print(f"key={key} value={val}") # destroy cache await cache.destroy() @@ -85,32 +87,35 @@ def sync_example(): }) # starting transaction + key = 1 with client.tx_start( - isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC ) as tx: - cache.put(1, 'success') + cache.put(key, 'success') tx.commit() # key=1 value=success - print(f"key=1 value={cache.get(1)}") + print(f"key={key} value={cache.get(key)}") # rollback transaction. try: with client.tx_start( - isolation=TransactionIsolation.REPEATABLE_READ, concurrency=TransactionConcurrency.PESSIMISTIC + isolation=TransactionIsolation.REPEATABLE_READ, + concurrency=TransactionConcurrency.PESSIMISTIC ): - cache.put(1, 'fail') + cache.put(key, 'fail') raise RuntimeError('test') except RuntimeError: pass # key=1 value=success - print(f"key=1 value={cache.get(1)}") + print(f"key={key} value={cache.get(key)}") # rollback transaction on timeout. try: with client.tx_start(timeout=1.0, label='long-tx') as tx: - cache.put(1, 'fail') + cache.put(key, 'fail') time.sleep(2.0) tx.commit() except CacheError as e: @@ -118,7 +123,7 @@ def sync_example(): print(e) # key=1 value=success - print(f"key=1 value={cache.get(1)}") + print(f"key={key} value={cache.get(key)}") # destroy cache cache.destroy() diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 26d243d..2bc850b 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -60,7 +60,7 @@ class AioClient(BaseClient): Asynchronous Client implementation. """ - def __init__(self, compact_footer: bool = None, partition_aware: bool = False, **kwargs): + def __init__(self, compact_footer: bool = None, partition_aware: bool = True, **kwargs): """ Initialize client. @@ -68,13 +68,10 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = False, * full (False) schema approach when serializing Complex objects. Default is to use the same approach the server is using (None). Apache Ignite binary protocol documentation on this topic: - https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-schema + https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#schema :param partition_aware: (optional) try to calculate the exact data placement from the key before to issue the key operation to the - server node: - https://cwiki.apache.org/confluence/display/IGNITE/IEP-23%3A+Best+Effort+Affinity+for+thin+clients - The feature is in experimental status, so the parameter is `False` - by default. This will be changed later. + server node, `True` by default. """ super().__init__(compact_footer, partition_aware, **kwargs) self._registry_mux = asyncio.Lock() @@ -494,7 +491,7 @@ def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency. isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ, timeout: Union[int, float] = 0, label: Optional[str] = None) -> 'AioTransaction': """ - Start async thin client transaction. + Start async thin client transaction. **Supported only python 3.7+** :param concurrency: (optional) transaction concurrency, see :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency` diff --git a/pyignite/api/__init__.py b/pyignite/api/__init__.py index 7deed8c..19a7036 100644 --- a/pyignite/api/__init__.py +++ b/pyignite/api/__init__.py @@ -17,7 +17,7 @@ This module contains functions, that are (more or less) directly mapped to Apache Ignite binary protocol operations. Read more: -https://apacheignite.readme.io/docs/binary-client-protocol#section-client-operations +https://ignite.apache.org/docs/latest/binary-client-protocol/binary-client-protocol#client-operations When the binary client protocol changes, these functions also change. For stable end user API see :mod:`pyignite.client` module. diff --git a/pyignite/client.py b/pyignite/client.py index b411a2b..f848bcc 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -335,17 +335,10 @@ def __exit__(self, exc_type, exc_val, exc_tb): class Client(BaseClient): """ - This is a main `pyignite` class, that is build upon the - :class:`~pyignite.connection.Connection`. In addition to the attributes, - properties and methods of its parent class, `Client` implements - the following features: - - * cache factory. Cache objects are used for key-value operations, - * Ignite SQL endpoint, - * binary types registration endpoint. + Synchronous Client implementation. """ - def __init__(self, compact_footer: bool = None, partition_aware: bool = False, **kwargs): + def __init__(self, compact_footer: bool = None, partition_aware: bool = True, **kwargs): """ Initialize client. @@ -353,13 +346,10 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = False, * full (False) schema approach when serializing Complex objects. Default is to use the same approach the server is using (None). Apache Ignite binary protocol documentation on this topic: - https://apacheignite.readme.io/docs/binary-client-protocol-data-format#section-schema + https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#schema :param partition_aware: (optional) try to calculate the exact data placement from the key before to issue the key operation to the - server node: - https://cwiki.apache.org/confluence/display/IGNITE/IEP-23%3A+Best+Effort+Affinity+for+thin+clients - The feature is in experimental status, so the parameter is `False` - by default. This will be changed later. + server node, `True` by default. """ super().__init__(compact_footer, partition_aware, **kwargs) diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index 86993ba..c6ecbc6 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -37,7 +37,7 @@ from .bitmask_feature import BitmaskFeature from .connection import BaseConnection -from .handshake import HandshakeRequest, HandshakeResponse, OP_HANDSHAKE +from .handshake import HandshakeRequest, HandshakeResponse from .protocol_context import ProtocolContext from .ssl import create_ssl_context from ..stream.binary_stream import BinaryStreamBase @@ -152,20 +152,23 @@ def __init__(self, client: 'AioClient', host: str, port: int, username: str = No self._transport = None self._loop = asyncio.get_event_loop() self._closed = False + self._transport_closed_fut = None @property def closed(self) -> bool: """ Tells if socket is closed. """ return self._closed or not self._transport or self._transport.is_closing() - async def connect(self) -> Union[dict, OrderedDict]: + async def connect(self): """ Connect to the given server node with protocol version fallback. """ + if self.alive: + return self._closed = False - return await self._connect() + await self._connect() - async def _connect(self) -> Union[dict, OrderedDict]: + async def _connect(self): detecting_protocol = False # choose highest version first @@ -192,13 +195,16 @@ async def _connect(self) -> Union[dict, OrderedDict]: self.client.protocol_context.features = features self.uuid = result.get('node_uuid', None) # version-specific (1.4+) self.failed = False - return result def on_connection_lost(self, error, reconnect=False): self.failed = True for _, fut in self._pending_reqs.items(): fut.set_exception(error) self._pending_reqs.clear() + + if self._transport_closed_fut and not self._transport_closed_fut.done(): + self._transport_closed_fut.set_result(None) + if reconnect and not self._closed: self._loop.create_task(self._reconnect()) @@ -221,7 +227,7 @@ async def _connect_version(self) -> Union[dict, OrderedDict]: hs_response = await handshake_fut if hs_response.op_code == 0: - self._close_transport() + await self._close_transport() self._process_handshake_error(hs_response) return hs_response @@ -233,7 +239,7 @@ async def _reconnect(self): if self.alive: return - self._close_transport() + await self._close_transport() # connect and silence the connection errors try: await self._connect() @@ -259,12 +265,20 @@ async def _send(self, query_id, data): async def close(self): self._closed = True - self._close_transport() + await self._close_transport() - def _close_transport(self): + async def _close_transport(self): """ Close connection. """ - if self._transport: + if self._transport and not self._transport.is_closing(): + self._transport_closed_fut = self._loop.create_future() + self._transport.close() self._transport = None + try: + await asyncio.wait_for(self._transport_closed_fut, 1.0) + except asyncio.TimeoutError: + pass + finally: + self._transport_closed_fut = None diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index e8437dc..2e6d6aa 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -156,12 +156,9 @@ def __init__(self, client: 'Client', host: str, port: int, timeout: float = None def closed(self) -> bool: return self._socket is None - def connect(self) -> Union[dict, OrderedDict]: + def connect(self): """ Connect to the given server node with protocol version fallback. - - :param host: Ignite server node's host name or IP, - :param port: Ignite server node's port number. """ detecting_protocol = False @@ -189,7 +186,6 @@ def connect(self) -> Union[dict, OrderedDict]: self.client.protocol_context.features = features self.uuid = result.get('node_uuid', None) # version-specific (1.4+) self.failed = False - return result def _connect_version(self) -> Union[dict, OrderedDict]: """ diff --git a/pyignite/exceptions.py b/pyignite/exceptions.py index fdf1261..7419512 100644 --- a/pyignite/exceptions.py +++ b/pyignite/exceptions.py @@ -38,7 +38,7 @@ class HandshakeError(SocketError): """ This exception is raised on Ignite binary protocol handshake failure, as defined in - https://apacheignite.readme.io/docs/binary-client-protocol#section-handshake + https://ignite.apache.org/docs/latest/binary-client-protocol/binary-client-protocol#connection-handshake """ def __init__(self, expected_version: Tuple[int, int, int], message: str): diff --git a/tests/common/test_transactions.py b/tests/common/test_transactions.py index f4efba5..57874b6 100644 --- a/tests/common/test_transactions.py +++ b/tests/common/test_transactions.py @@ -131,7 +131,7 @@ async def test_simple_transaction_async(async_client, async_tx_cache, iso_level, assert await async_tx_cache.get(1) == 1 async with async_client.tx_start(isolation=iso_level, concurrency=concurrency) as tx: - async_tx_cache.put(1, 10) + await async_tx_cache.put(1, 10) assert await async_tx_cache.get(1) == 1 From 365b16e17a23bc233c770ac4811913d49a0972e3 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Tue, 15 Jun 2021 10:29:53 +0300 Subject: [PATCH 44/62] Update version for the next release --- pyignite/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyignite/__init__.py b/pyignite/__init__.py index 4b77f68..1b0a9c2 100644 --- a/pyignite/__init__.py +++ b/pyignite/__init__.py @@ -17,4 +17,4 @@ from pyignite.aio_client import AioClient from pyignite.binary import GenericObjectMeta -__version__ = '0.5.0-dev' +__version__ = '0.6.0-dev' From 05413e7368fc8423a85d95efa0d07088ddfb0f62 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Tue, 15 Jun 2021 11:41:51 +0300 Subject: [PATCH 45/62] Add release notes for 0.5.0 - Fixes #43. --- RELEASE_NOTES.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt index 9fee8ea..9d2ae81 100644 --- a/RELEASE_NOTES.txt +++ b/RELEASE_NOTES.txt @@ -1,6 +1,14 @@ Apache Ignite python thin client ================================ +0.5.0 +-------------------------------- +* Added transaction API support (sync and async versions, async version supports only python 3.7+) +* Added ExpiryPolicy (TTL) support +* Improved performance of asyncio version by reimplementing network code using asyncio transports and protocols +* Enabled partition awareness by default +* Fixed handling collections of binary objects + 0.4.0 -------------------------------- * Added partition awareness support From 92a115cf450a71d811dc1af684b3ba7fa04a98f2 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 16 Jun 2021 16:32:44 +0300 Subject: [PATCH 46/62] IGNITE-14911 Unify timeouts, add support for datetime.timedelta for expiry_policy - Fixes #44. --- docs/async_examples.rst | 6 +-- docs/examples.rst | 6 +-- examples/expiry_policy.py | 9 +++-- examples/transactions.py | 4 +- pyignite/aio_client.py | 8 ++-- pyignite/cache.py | 16 ++++---- pyignite/client.py | 8 ++-- pyignite/datatypes/cache_properties.py | 22 +++++++++-- pyignite/datatypes/expiry_policy.py | 27 +++++++------ pyignite/transaction.py | 46 ++++++++++++++------- tests/common/test_expiry_policy.py | 55 ++++++++++++++++---------- tests/common/test_transactions.py | 23 ++++++++++- 12 files changed, 151 insertions(+), 79 deletions(-) diff --git a/docs/async_examples.rst b/docs/async_examples.rst index af61a75..4ce65ce 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -63,12 +63,12 @@ in cache settings dictionary on creation. .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 72-75 + :lines: 73-76 .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 81-89 + :lines: 82-90 Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use :py:meth:`~pyignite.cache.BaseCache.with_expire_policy` @@ -76,7 +76,7 @@ Secondly, expiry policy can be set for all cache operations, which are done unde .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 96-105 + :lines: 97-106 Transactions ------------ diff --git a/docs/examples.rst b/docs/examples.rst index e01f112..4ca0910 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -97,12 +97,12 @@ in cache settings dictionary on creation. .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 31-34 + :lines: 32-35 .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 40-46 + :lines: 41-47 Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use :py:meth:`~pyignite.cache.BaseCache.with_expire_policy` @@ -110,7 +110,7 @@ Secondly, expiry policy can be set for all cache operations, which are done unde .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 53-60 + :lines: 54-61 Scan ==== diff --git a/examples/expiry_policy.py b/examples/expiry_policy.py index 2002da1..3dbe54b 100644 --- a/examples/expiry_policy.py +++ b/examples/expiry_policy.py @@ -14,6 +14,7 @@ # limitations under the License. import asyncio import time +from datetime import timedelta from pyignite import Client, AioClient from pyignite.datatypes import ExpiryPolicy @@ -30,7 +31,7 @@ def main(): try: ttl_cache = client.create_cache({ PROP_NAME: 'test', - PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + PROP_EXPIRY_POLICY: ExpiryPolicy(create=timedelta(seconds=1.0)) }) except NotSupportedByClusterError: print("'ExpiryPolicy' API is not supported by cluster. Finishing...") @@ -50,7 +51,7 @@ def main(): print("Create simple Cache and set TTL through `with_expire_policy`") simple_cache = client.create_cache('test') try: - ttl_cache = simple_cache.with_expire_policy(access=1.0) + ttl_cache = simple_cache.with_expire_policy(access=timedelta(seconds=1.0)) ttl_cache.put(1, 1) time.sleep(0.5) print(f"key = {1}, value = {ttl_cache.get(1)}") @@ -71,7 +72,7 @@ async def async_main(): try: ttl_cache = await client.create_cache({ PROP_NAME: 'test', - PROP_EXPIRY_POLICY: ExpiryPolicy(create=1.0) + PROP_EXPIRY_POLICY: ExpiryPolicy(create=timedelta(seconds=1.0)) }) except NotSupportedByClusterError: print("'ExpiryPolicy' API is not supported by cluster. Finishing...") @@ -93,7 +94,7 @@ async def async_main(): print("Create simple Cache and set TTL through `with_expire_policy`") simple_cache = await client.create_cache('test') try: - ttl_cache = simple_cache.with_expire_policy(access=1.0) + ttl_cache = simple_cache.with_expire_policy(access=timedelta(seconds=1.0)) await ttl_cache.put(1, 1) await asyncio.sleep(0.5) value = await ttl_cache.get(1) diff --git a/examples/transactions.py b/examples/transactions.py index ef9b08c..53da05f 100644 --- a/examples/transactions.py +++ b/examples/transactions.py @@ -62,7 +62,7 @@ async def async_example(): # rollback transaction on timeout. try: - async with client.tx_start(timeout=1.0, label='long-tx') as tx: + async with client.tx_start(timeout=1000, label='long-tx') as tx: await cache.put(key, 'fail') await asyncio.sleep(2.0) await tx.commit() @@ -114,7 +114,7 @@ def sync_example(): # rollback transaction on timeout. try: - with client.tx_start(timeout=1.0, label='long-tx') as tx: + with client.tx_start(timeout=1000, label='long-tx') as tx: cache.put(key, 'fail') time.sleep(2.0) tx.commit() diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 2bc850b..0bb2b8c 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -489,15 +489,15 @@ def get_cluster(self) -> 'AioCluster': def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency.PESSIMISTIC, isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ, - timeout: Union[int, float] = 0, label: Optional[str] = None) -> 'AioTransaction': + timeout: int = 0, label: Optional[str] = None) -> 'AioTransaction': """ Start async thin client transaction. **Supported only python 3.7+** :param concurrency: (optional) transaction concurrency, see - :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency` + :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency`, :param isolation: (optional) transaction isolation level, see - :py:class:`~pyignite.datatypes.transactions.TransactionIsolation` - :param timeout: (optional) transaction timeout in seconds if float, in millis if int + :py:class:`~pyignite.datatypes.transactions.TransactionIsolation`, + :param timeout: (optional) transaction timeout in milliseconds, :param label: (optional) transaction label. :return: :py:class:`~pyignite.transaction.AioTransaction` instance. """ diff --git a/pyignite/cache.py b/pyignite/cache.py index 79fa0f5..51f07c9 100644 --- a/pyignite/cache.py +++ b/pyignite/cache.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import datetime from typing import Any, Iterable, Optional, Tuple, Union from .api.tx_api import get_tx_connection @@ -136,16 +136,16 @@ def cache_id(self) -> int: def with_expire_policy( self, expiry_policy: Optional[ExpiryPolicy] = None, - create: Union[int, float] = ExpiryPolicy.UNCHANGED, - update: Union[int, float] = ExpiryPolicy.UNCHANGED, - access: Union[int, float] = ExpiryPolicy.UNCHANGED + create: Union[int, datetime.timedelta] = ExpiryPolicy.UNCHANGED, + update: Union[int, datetime.timedelta] = ExpiryPolicy.UNCHANGED, + access: Union[int, datetime.timedelta] = ExpiryPolicy.UNCHANGED ): """ :param expiry_policy: optional :class:`~pyignite.datatypes.expiry_policy.ExpiryPolicy` - object. If it is set, other params will be ignored. - :param create: create TTL in seconds (float) or milliseconds (int), - :param update: Create TTL in seconds (float) or milliseconds (int), - :param access: Create TTL in seconds (float) or milliseconds (int). + object. If it is set, other params will be ignored, + :param create: TTL for create in milliseconds or :py:class:`~time.timedelta`, + :param update: TTL for update in milliseconds or :py:class:`~time.timedelta`, + :param access: TTL for access in milliseconds or :py:class:`~time.timedelta`, :return: cache decorator with expiry policy set. """ if not self.client.protocol_context.is_expiry_policy_supported(): diff --git a/pyignite/client.py b/pyignite/client.py index f848bcc..6a499a3 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -744,15 +744,15 @@ def get_cluster(self) -> 'Cluster': def tx_start(self, concurrency: TransactionConcurrency = TransactionConcurrency.PESSIMISTIC, isolation: TransactionIsolation = TransactionIsolation.REPEATABLE_READ, - timeout: Union[int, float] = 0, label: Optional[str] = None) -> 'Transaction': + timeout: int = 0, label: Optional[str] = None) -> 'Transaction': """ Start thin client transaction. :param concurrency: (optional) transaction concurrency, see - :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency` + :py:class:`~pyignite.datatypes.transactions.TransactionConcurrency`, :param isolation: (optional) transaction isolation level, see - :py:class:`~pyignite.datatypes.transactions.TransactionIsolation` - :param timeout: (optional) transaction timeout in seconds if float, in millis if int + :py:class:`~pyignite.datatypes.transactions.TransactionIsolation`, + :param timeout: (optional) transaction timeout in milliseconds, :param label: (optional) transaction label. :return: :py:class:`~pyignite.transaction.Transaction` instance. """ diff --git a/pyignite/datatypes/cache_properties.py b/pyignite/datatypes/cache_properties.py index 49327a3..0d7f402 100644 --- a/pyignite/datatypes/cache_properties.py +++ b/pyignite/datatypes/cache_properties.py @@ -14,6 +14,8 @@ # limitations under the License. import ctypes +import math +from typing import Union from . import ExpiryPolicy from .prop_codes import * @@ -137,6 +139,20 @@ async def from_python_async(cls, stream, value): return cls.from_python(stream, value) +class TimeoutProp(PropBase): + prop_data_class = Long + + @classmethod + def from_python(cls, stream, value: int): + if not isinstance(value, int) or value < 0: + raise ValueError(f'Timeout value should be a positive integer, {value} passed instead') + return super().from_python(stream, value) + + @classmethod + async def from_python_async(cls, stream, value): + return cls.from_python(stream, value) + + class PropName(PropBase): prop_code = PROP_NAME prop_data_class = String @@ -227,9 +243,8 @@ class PropRebalanceDelay(PropBase): prop_data_class = Long -class PropRebalanceTimeout(PropBase): +class PropRebalanceTimeout(TimeoutProp): prop_code = PROP_REBALANCE_TIMEOUT - prop_data_class = Long class PropRebalanceBatchSize(PropBase): @@ -262,9 +277,8 @@ class PropCacheKeyConfiguration(PropBase): prop_data_class = CacheKeyConfiguration -class PropDefaultLockTimeout(PropBase): +class PropDefaultLockTimeout(TimeoutProp): prop_code = PROP_DEFAULT_LOCK_TIMEOUT - prop_data_class = Long class PropMaxConcurrentAsyncOperation(PropBase): diff --git a/pyignite/datatypes/expiry_policy.py b/pyignite/datatypes/expiry_policy.py index d729da5..95e37db 100644 --- a/pyignite/datatypes/expiry_policy.py +++ b/pyignite/datatypes/expiry_policy.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import ctypes +import math +from datetime import timedelta from io import SEEK_CUR from typing import Union @@ -22,13 +24,16 @@ def _positive(_, attrib, value): + if isinstance(value, timedelta): + value = value.total_seconds() * 1000 + if value < 0 and value not in [ExpiryPolicy.UNCHANGED, ExpiryPolicy.ETERNAL]: raise ValueError(f"'{attrib.name}' value must not be negative") def _write_duration(stream, value): - if isinstance(value, float): - value = int(value * 1000) + if isinstance(value, timedelta): + value = math.floor(value.total_seconds() * 1000) stream.write(value.to_bytes(8, byteorder=PROTOCOL_BYTE_ORDER, signed=True)) @@ -44,17 +49,17 @@ class ExpiryPolicy: #: Set TTL eternal. ETERNAL = -1 - #: Set TTL for create in seconds(float) or millis(int) - create = attr.ib(kw_only=True, default=UNCHANGED, - validator=[attr.validators.instance_of((int, float)), _positive]) + #: Set TTL for create in milliseconds or :py:class:`~time.timedelta` + create = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, timedelta], + validator=[attr.validators.instance_of((int, timedelta)), _positive]) - #: Set TTL for update in seconds(float) or millis(int) - update = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], - validator=[attr.validators.instance_of((int, float)), _positive]) + #: Set TTL for update in milliseconds or :py:class:`~time.timedelta` + update = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, timedelta], + validator=[attr.validators.instance_of((int, timedelta)), _positive]) - #: Set TTL for access in seconds(float) or millis(int) - access = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, float], - validator=[attr.validators.instance_of((int, float)), _positive]) + #: Set TTL for access in milliseconds or :py:class:`~time.timedelta` + access = attr.ib(kw_only=True, default=UNCHANGED, type=Union[int, timedelta], + validator=[attr.validators.instance_of((int, timedelta)), _positive]) class _CType(ctypes.LittleEndianStructure): _pack_ = 1 diff --git a/pyignite/transaction.py b/pyignite/transaction.py index 5bafa6b..eb77f8d 100644 --- a/pyignite/transaction.py +++ b/pyignite/transaction.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import math -from typing import Union +from enum import IntEnum +from typing import Union, Type from pyignite.api.tx_api import tx_end, tx_start, tx_end_async, tx_start_async from pyignite.datatypes import TransactionIsolation, TransactionConcurrency @@ -22,21 +22,41 @@ from pyignite.utils import status_to_exception -def _convert_to_millis(timeout: Union[int, float]) -> int: - if isinstance(timeout, float): - return math.floor(timeout * 1000) - return timeout +def _validate_int_enum_param(value: Union[int, IntEnum], cls: Type[IntEnum]): + if value not in cls: + raise ValueError(f'{value} not in {cls}') + return value -class Transaction: +def _validate_timeout(value): + if not isinstance(value, int) or value < 0: + raise ValueError(f'Timeout value should be a positive integer, {value} passed instead') + return value + + +def _validate_label(value): + if value and not isinstance(value, str): + raise ValueError(f'Label should be str, {type(value)} passed instead') + return value + + +class _BaseTransaction: + def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, + isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): + self.client = client + self.concurrency = _validate_int_enum_param(concurrency, TransactionConcurrency) + self.isolation = _validate_int_enum_param(isolation, TransactionIsolation) + self.timeout = _validate_timeout(timeout) + self.label, self.closed = _validate_label(label), False + + +class Transaction(_BaseTransaction): """ Thin client transaction. """ def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): - self.client, self.concurrency = client, concurrency - self.isolation, self.timeout = isolation, _convert_to_millis(timeout) - self.label, self.closed = label, False + super().__init__(client, concurrency, isolation, timeout, label) self.tx_id = self.__start_tx() def commit(self) -> None: @@ -77,15 +97,13 @@ def __end_tx(self, committed): return tx_end(self.tx_id, committed) -class AioTransaction: +class AioTransaction(_BaseTransaction): """ Async thin client transaction. """ def __init__(self, client, concurrency=TransactionConcurrency.PESSIMISTIC, isolation=TransactionIsolation.REPEATABLE_READ, timeout=0, label=None): - self.client, self.concurrency = client, concurrency - self.isolation, self.timeout = isolation, _convert_to_millis(timeout) - self.label, self.closed = label, False + super().__init__(client, concurrency, isolation, timeout, label) def __await__(self): return (yield from self.__aenter__().__await__()) diff --git a/tests/common/test_expiry_policy.py b/tests/common/test_expiry_policy.py index 9dc4152..939a380 100644 --- a/tests/common/test_expiry_policy.py +++ b/tests/common/test_expiry_policy.py @@ -14,6 +14,7 @@ # limitations under the License. import asyncio import time +from datetime import timedelta import pytest @@ -23,11 +24,11 @@ @pytest.mark.skip_if_no_expiry_policy def test_expiry_policy(cache): - ttl, num_retries = 0.6, 10 + ttl, num_retries = timedelta(seconds=0.6), 10 cache_eternal = cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) - cache_created = cache.with_expire_policy(create=0.6) - cache_updated = cache.with_expire_policy(update=0.6) - cache_accessed = cache.with_expire_policy(access=0.6) + cache_created = cache.with_expire_policy(create=ttl) + cache_updated = cache.with_expire_policy(update=ttl) + cache_accessed = cache.with_expire_policy(access=ttl) for _ in range(num_retries): cache.clear() @@ -39,11 +40,11 @@ def test_expiry_policy(cache): cache_updated.put(2, 2) cache_accessed.put(3, 3) - time.sleep(ttl * 2 / 3) + time.sleep(ttl.total_seconds() * 2 / 3) result = [cache.contains_key(k) for k in range(4)] - if time.time() - start >= ttl: + if time.time() - start >= ttl.total_seconds(): continue assert all(result) @@ -55,20 +56,20 @@ def test_expiry_policy(cache): cache_updated.put(2, 3) # Check that update policy works. cache_accessed.get(3) # Check that access policy works. - time.sleep(ttl * 2 / 3) + time.sleep(ttl.total_seconds() * 2 / 3) result = [cache.contains_key(k) for k in range(4)] - if time.time() - start >= ttl: + if time.time() - start >= ttl.total_seconds(): continue assert result == [True, False, True, True] - time.sleep(ttl * 2 / 3) + time.sleep(ttl.total_seconds() * 2 / 3) cache_updated.get(2) # Check that access doesn't matter for updated policy. - time.sleep(ttl * 2 / 3) + time.sleep(ttl.total_seconds() * 2 / 3) result = [cache.contains_key(k) for k in range(0, 4)] assert result == [True, False, False, False] @@ -77,11 +78,11 @@ def test_expiry_policy(cache): @pytest.mark.asyncio @pytest.mark.skip_if_no_expiry_policy async def test_expiry_policy_async(async_cache): - ttl, num_retries = 0.6, 10 + ttl, num_retries = timedelta(seconds=0.6), 10 cache_eternal = async_cache.with_expire_policy(create=ExpiryPolicy.ETERNAL) - cache_created = async_cache.with_expire_policy(create=0.6) - cache_updated = async_cache.with_expire_policy(update=0.6) - cache_accessed = async_cache.with_expire_policy(access=0.6) + cache_created = async_cache.with_expire_policy(create=ttl) + cache_updated = async_cache.with_expire_policy(update=ttl) + cache_accessed = async_cache.with_expire_policy(access=ttl) for _ in range(num_retries): await async_cache.clear() @@ -95,11 +96,11 @@ async def test_expiry_policy_async(async_cache): cache_accessed.put(3, 3) ) - await asyncio.sleep(ttl * 2 / 3) + await asyncio.sleep(ttl.total_seconds() * 2 / 3) result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) - if time.time() - start >= ttl: + if time.time() - start >= ttl.total_seconds(): continue assert all(result) @@ -113,20 +114,20 @@ async def test_expiry_policy_async(async_cache): cache_accessed.get(3) # Check that access policy works. ) - await asyncio.sleep(ttl * 2 / 3) + await asyncio.sleep(ttl.total_seconds() * 2 / 3) result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) - if time.time() - start >= ttl: + if time.time() - start >= ttl.total_seconds(): continue assert result == [True, False, True, True] - await asyncio.sleep(ttl * 2 / 3) + await asyncio.sleep(ttl.total_seconds() * 2 / 3) await cache_updated.get(2) # Check that access doesn't matter for updated policy. - await asyncio.sleep(ttl * 2 / 3) + await asyncio.sleep(ttl.total_seconds() * 2 / 3) result = await asyncio.gather(*[async_cache.contains_key(k) for k in range(4)]) assert result == [True, False, False, False] @@ -169,3 +170,17 @@ async def test_create_cache_with_expiry_policy_async(async_client, expiry_policy assert settings[PROP_EXPIRY_POLICY] == expiry_policy finally: await cache.destroy() + + +@pytest.mark.skip_if_no_expiry_policy +@pytest.mark.parametrize( + 'params', + [ + {'create': timedelta(seconds=-1), 'update': timedelta(seconds=-1), 'delete': timedelta(seconds=-1)}, + {'create': 0.6}, + {'create': -3} + ] +) +def test_expiry_policy_param_validation(params): + with pytest.raises((TypeError, ValueError)): + ExpiryPolicy(**params) diff --git a/tests/common/test_transactions.py b/tests/common/test_transactions.py index 57874b6..e879f60 100644 --- a/tests/common/test_transactions.py +++ b/tests/common/test_transactions.py @@ -25,6 +25,7 @@ from pyignite.datatypes.cache_config import CacheAtomicityMode from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_ATOMICITY_MODE from pyignite.exceptions import CacheError +from pyignite.transaction import Transaction, AioTransaction @pytest.fixture @@ -137,7 +138,7 @@ async def test_simple_transaction_async(async_client, async_tx_cache, iso_level, def test_transactions_timeout(client, tx_cache): - with client.tx_start(timeout=2.0, label='tx-sync') as tx: + with client.tx_start(timeout=2000, label='tx-sync') as tx: tx_cache.put(1, 1) time.sleep(3.0) with pytest.raises(CacheError) as to_error: @@ -160,7 +161,7 @@ async def update(i, timeout): await tx.commit() - task = asyncio.gather(*[update(i, 2.0) for i in range(20)], return_exceptions=True) + task = asyncio.gather(*[update(i, 2000) for i in range(20)], return_exceptions=True) await asyncio.sleep(5.0) assert task.done() # Check that all transactions completed or rolled-back on timeout for i, ex in enumerate(task.result()): @@ -231,3 +232,21 @@ async def update(i): await asyncio.gather(*[update(i) for i in range(20)], return_exceptions=True) assert await async_tx_cache.get_all(list(range(20))) == {i: f'test-{i}' for i in range(20) if i % 2 == 0} + + +@pytest.mark.parametrize( + "params", + [ + {'isolation': 25}, + {'concurrency': 45}, + {'timeout': 2.0}, + {'timeout': -10}, + {'label': 100500} + ] +) +def test_tx_parameter_validation(params): + with pytest.raises((TypeError, ValueError)): + Transaction(None, **params) + + with pytest.raises((TypeError, ValueError)): + AioTransaction(None, **params) From dd3b280ad7ad9d7a679355f0a32b80fce9cf99d5 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Mon, 21 Jun 2021 12:13:06 +0300 Subject: [PATCH 47/62] Update to latest version of released pyignite in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8e009de..3f35643 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ pip install pyignite To install a specific version: ```bash -pip install pyignite==0.4.0 +pip install pyignite==0.5.0 ``` ## Documentation From 8fc14f874e6cd2f144b4fe3f4029bd46d8defe09 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 14 Jul 2021 12:59:21 +0300 Subject: [PATCH 48/62] IGNITE-15103 Implement debug and error logging of connections and queries - Fixes #45. --- pyignite/connection/aio_connection.py | 32 ++++---- pyignite/connection/connection.py | 71 ++++++++++++++---- pyignite/connection/protocol_context.py | 3 + pyignite/queries/query.py | 99 +++++++++++++++++++------ tests/conftest.py | 10 +++ tests/security/test_auth.py | 41 ++++++++-- tests/security/test_ssl.py | 16 +++- 7 files changed, 208 insertions(+), 64 deletions(-) diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index c6ecbc6..c5fa24d 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -33,7 +33,7 @@ from typing import Union from pyignite.constants import PROTOCOLS, PROTOCOL_BYTE_ORDER -from pyignite.exceptions import HandshakeError, SocketError, connection_errors +from pyignite.exceptions import HandshakeError, SocketError, connection_errors, AuthenticationError from .bitmask_feature import BitmaskFeature from .connection import BaseConnection @@ -68,7 +68,7 @@ def data_received(self, data: bytes) -> None: hs_response = self.__parse_handshake(packet, self._conn.client) self._handshake_fut.set_result(hs_response) else: - self._conn.on_message(packet) + self._conn.process_message(packet) self._buffer = self._buffer[packet_sz:len(self._buffer)] def __has_full_response(self): @@ -84,7 +84,7 @@ def __process_connection_error(self, exc): connected = self._handshake_fut.done() if not connected: self._handshake_fut.set_exception(exc) - self._conn.on_connection_lost(exc, connected) + self._conn.process_connection_lost(exc, connected) @staticmethod def __send_handshake(transport, conn): @@ -177,38 +177,41 @@ async def _connect(self): self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) try: + self._on_handshake_start() result = await self._connect_version() except HandshakeError as e: if e.expected_version in PROTOCOLS: self.client.protocol_context.version = e.expected_version result = await self._connect_version() else: + self._on_handshake_fail(e) raise e - except connection_errors: + except AuthenticationError as e: + self._on_handshake_fail(e) + raise e + except Exception as e: # restore undefined protocol version if detecting_protocol: self.client.protocol_context = None - raise + self._on_handshake_fail(e) + raise e - # connection is ready for end user - features = BitmaskFeature.from_array(result.get('features', None)) - self.client.protocol_context.features = features - self.uuid = result.get('node_uuid', None) # version-specific (1.4+) - self.failed = False + self._on_handshake_success(result) - def on_connection_lost(self, error, reconnect=False): + def process_connection_lost(self, err, reconnect=False): self.failed = True for _, fut in self._pending_reqs.items(): - fut.set_exception(error) + fut.set_exception(err) self._pending_reqs.clear() if self._transport_closed_fut and not self._transport_closed_fut.done(): self._transport_closed_fut.set_result(None) if reconnect and not self._closed: + self._on_connection_lost(err) self._loop.create_task(self._reconnect()) - def on_message(self, data): + def process_message(self, data): req_id = int.from_bytes(data[4:12], byteorder=PROTOCOL_BYTE_ORDER, signed=True) if req_id in self._pending_reqs: self._pending_reqs[req_id].set_result(data) @@ -227,7 +230,7 @@ async def _connect_version(self) -> Union[dict, OrderedDict]: hs_response = await handshake_fut if hs_response.op_code == 0: - await self._close_transport() + await self.close() self._process_handshake_error(hs_response) return hs_response @@ -281,4 +284,5 @@ async def _close_transport(self): except asyncio.TimeoutError: pass finally: + self._on_connection_lost(expected=True) self._transport_closed_fut = None diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 2e6d6aa..ae5587a 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging from collections import OrderedDict import socket from typing import Union @@ -28,6 +29,8 @@ CLIENT_STATUS_AUTH_FAILURE = 2000 +logger = logging.getLogger('.'.join(__name__.split('.')[:-1])) + class BaseConnection: def __init__(self, client, host: str = None, port: int = None, username: str = None, password: str = None, @@ -78,21 +81,53 @@ def protocol_context(self): return self.client.protocol_context def _process_handshake_error(self, response): - error_text = f'Handshake error: {response.message}' # if handshake fails for any reason other than protocol mismatch # (i.e. authentication error), server version is 0.0.0 + if response.client_status == CLIENT_STATUS_AUTH_FAILURE: + raise AuthenticationError(response.message) + protocol_version = self.client.protocol_context.version server_version = (response.version_major, response.version_minor, response.version_patch) - + error_text = f'Handshake error: {response.message}' if any(server_version): error_text += f' Server expects binary protocol version ' \ f'{server_version[0]}.{server_version[1]}.{server_version[2]}. ' \ f'Client provides ' \ f'{protocol_version[0]}.{protocol_version[1]}.{protocol_version[2]}.' - elif response.client_status == CLIENT_STATUS_AUTH_FAILURE: - raise AuthenticationError(error_text) raise HandshakeError(server_version, error_text) + def _on_handshake_start(self): + if logger.isEnabledFor(logging.DEBUG): + logger.debug("Connecting to node(address=%s, port=%d) with protocol context %s", + self.host, self.port, self.client.protocol_context) + + def _on_handshake_success(self, result): + features = BitmaskFeature.from_array(result.get('features', None)) + self.client.protocol_context.features = features + self.uuid = result.get('node_uuid', None) # version-specific (1.4+) + self.failed = False + + if logger.isEnabledFor(logging.DEBUG): + logger.debug("Connected to node(address=%s, port=%d, node_uuid=%s) with protocol context %s", + self.host, self.port, self.uuid, self.client.protocol_context) + + def _on_handshake_fail(self, err): + if isinstance(err, AuthenticationError): + logger.error("Authentication failed while connecting to node(address=%s, port=%d): %s", + self.host, self.port, err) + else: + logger.error("Failed to perform handshake, connection to node(address=%s, port=%d) " + "with protocol context %s failed: %s", + self.host, self.port, self.client.protocol_context, err, exc_info=True) + + def _on_connection_lost(self, err=None, expected=False): + if expected and logger.isEnabledFor(logging.DEBUG): + logger.debug("Connection closed to node(address=%s, port=%d, node_uuid=%s)", + self.host, self.port, self.uuid) + else: + logger.info("Connection lost to node(address=%s, port=%d, node_uuid=%s): %s", + self.host, self.port, self.uuid, err) + class Connection(BaseConnection): """ @@ -168,24 +203,26 @@ def connect(self): self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) try: + self._on_handshake_start() result = self._connect_version() except HandshakeError as e: if e.expected_version in PROTOCOLS: self.client.protocol_context.version = e.expected_version result = self._connect_version() else: + self._on_handshake_fail(e) raise e - except connection_errors: + except AuthenticationError as e: + self._on_handshake_fail(e) + raise e + except Exception as e: # restore undefined protocol version if detecting_protocol: self.client.protocol_context = None - raise + self._on_handshake_fail(e) + raise e - # connection is ready for end user - features = BitmaskFeature.from_array(result.get('features', None)) - self.client.protocol_context.features = features - self.uuid = result.get('node_uuid', None) # version-specific (1.4+) - self.failed = False + self._on_handshake_success(result) def _connect_version(self) -> Union[dict, OrderedDict]: """ @@ -258,11 +295,12 @@ def send(self, data: Union[bytes, bytearray], flags=None, reconnect=True): try: self._socket.sendall(data, **kwargs) - except connection_errors: + except connection_errors as e: self.failed = True if reconnect: + self._on_connection_lost(e) self.reconnect() - raise + raise e def recv(self, flags=None, reconnect=True) -> bytearray: """ @@ -287,11 +325,12 @@ def recv(self, flags=None, reconnect=True) -> bytearray: if bytes_received == 0: raise SocketError('Connection broken.') bytes_total_received += bytes_received - except connection_errors: + except connection_errors as e: self.failed = True if reconnect: + self._on_connection_lost(e) self.reconnect() - raise + raise e if bytes_total_received < 4: continue @@ -325,5 +364,5 @@ def close(self): self._socket.close() except connection_errors: pass - + self._on_connection_lost(expected=True) self._socket = None diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py index 0f43aa4..58f509e 100644 --- a/pyignite/connection/protocol_context.py +++ b/pyignite/connection/protocol_context.py @@ -37,6 +37,9 @@ def __eq__(self, other): self.version == other.version and \ self.features == other.features + def __str__(self): + return f'ProtocolContext(version={self._version}, features={self._features})' + def _ensure_consistency(self): if not self.is_feature_flags_supported(): self._features = None diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index 4bcab9f..89c354e 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -14,6 +14,9 @@ # limitations under the License. import ctypes +import inspect +import logging +import time from io import SEEK_CUR import attr @@ -21,9 +24,12 @@ from pyignite.api.result import APIResult from pyignite.connection import Connection, AioConnection from pyignite.constants import MAX_LONG, RHF_TOPOLOGY_CHANGED +from pyignite.queries import op_codes from pyignite.queries.response import Response from pyignite.stream import AioBinaryStream, BinaryStream, READ_BACKWARD +logger = logging.getLogger('.'.join(__name__.split('.')[:-1])) + def query_perform(query_struct, conn, post_process_fun=None, **kwargs): async def _async_internal(): @@ -54,6 +60,18 @@ def _get_query_id(): return _QUERY_COUNTER +_OP_CODES = {code: name for name, code in inspect.getmembers(op_codes) if name.startswith('OP_')} + + +def _get_op_code_name(code): + global _OP_CODES + return _OP_CODES.get(code) + + +def _sec_to_millis(secs): + return int(secs * 1000) + + @attr.s class Query: op_code = attr.ib(type=int) @@ -61,6 +79,7 @@ class Query: query_id = attr.ib(type=int) response_type = attr.ib(type=type(Response), default=Response) _query_c_type = None + _start_ts = 0.0 @query_id.default def _set_query_id(self): @@ -134,22 +153,28 @@ def perform( :return: instance of :class:`~pyignite.api.result.APIResult` with raw value (may undergo further processing in API functions). """ - with BinaryStream(conn.client) as stream: - self.from_python(stream, query_params) - response_data = conn.request(stream.getvalue()) + try: + self._on_query_started(conn) - response_struct = self.response_type(protocol_context=conn.protocol_context, - following=response_config, **kwargs) + with BinaryStream(conn.client) as stream: + self.from_python(stream, query_params) + response_data = conn.request(stream.getvalue()) - with BinaryStream(conn.client, response_data) as stream: - response_ctype = response_struct.parse(stream) - response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) + response_struct = self.response_type(protocol_context=conn.protocol_context, + following=response_config, **kwargs) - result = self.__post_process_response(conn, response_struct, response) + with BinaryStream(conn.client, response_data) as stream: + response_ctype = response_struct.parse(stream) + response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) - if result.status == 0: - result.value = response_struct.to_python(response) - return result + result = self.__post_process_response(conn, response_struct, response) + if result.status == 0: + result.value = response_struct.to_python(response) + self._on_query_finished(conn, result=result) + return result + except Exception as e: + self._on_query_finished(conn, err=e) + raise e async def perform_async( self, conn: AioConnection, query_params: dict = None, @@ -166,22 +191,28 @@ async def perform_async( :return: instance of :class:`~pyignite.api.result.APIResult` with raw value (may undergo further processing in API functions). """ - with AioBinaryStream(conn.client) as stream: - await self.from_python_async(stream, query_params) - data = await conn.request(self.query_id, stream.getvalue()) + try: + self._on_query_started(conn) - response_struct = self.response_type(protocol_context=conn.protocol_context, - following=response_config, **kwargs) + with AioBinaryStream(conn.client) as stream: + await self.from_python_async(stream, query_params) + data = await conn.request(self.query_id, stream.getvalue()) - with AioBinaryStream(conn.client, data) as stream: - response_ctype = await response_struct.parse_async(stream) - response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) + response_struct = self.response_type(protocol_context=conn.protocol_context, + following=response_config, **kwargs) - result = self.__post_process_response(conn, response_struct, response) + with AioBinaryStream(conn.client, data) as stream: + response_ctype = await response_struct.parse_async(stream) + response = stream.read_ctype(response_ctype, direction=READ_BACKWARD) - if result.status == 0: - result.value = await response_struct.to_python_async(response) - return result + result = self.__post_process_response(conn, response_struct, response) + if result.status == 0: + result.value = await response_struct.to_python_async(response) + self._on_query_finished(conn, result=result) + return result + except Exception as e: + self._on_query_finished(conn, err=e) + raise e @staticmethod def __post_process_response(conn, response_struct, response): @@ -196,6 +227,26 @@ def __post_process_response(conn, response_struct, response): # build result return APIResult(response) + def _on_query_started(self, conn): + if logger.isEnabledFor(logging.DEBUG): + self._start_ts = time.monotonic() + logger.debug("Start query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s)", + self.query_id, _get_op_code_name(self.op_code), conn.host, conn.port, conn.uuid) + + def _on_query_finished(self, conn, result=None, err=None): + if logger.isEnabledFor(logging.DEBUG): + dur_ms = _sec_to_millis(time.monotonic() - self._start_ts) + if result and result.status != 0: + err = result.message + if err: + logger.debug("Failed to perform query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s) " + "in %.3f ms: %s", self.query_id, _get_op_code_name(self.op_code), + conn.host, conn.port, conn.uuid, dur_ms, err) + else: + logger.debug("Finished query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s) " + "successfully in %.3f ms", self.query_id, _get_op_code_name(self.op_code), + conn.host, conn.port, conn.uuid, dur_ms) + class ConfigQuery(Query): """ diff --git a/tests/conftest.py b/tests/conftest.py index 1c65356..70995a2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,9 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. import asyncio +import logging +import sys import pytest +logger = logging.getLogger('pyignite') +logger.setLevel(logging.DEBUG) +handler = logging.StreamHandler(stream=sys.stdout) +handler.setFormatter( + logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') +) +logger.addHandler(handler) + @pytest.fixture(autouse=True) def run_examples(request): diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py index b02f224..3586c91 100644 --- a/tests/security/test_auth.py +++ b/tests/security/test_auth.py @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import re + import pytest from pyignite import Client, AioClient @@ -39,19 +42,32 @@ def cleanup(): clear_ignite_work_dir() -def test_auth_success(with_ssl, ssl_params): +def test_auth_success(with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl client = Client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) - with client.connect("127.0.0.1", 10801): - assert all(node.alive for node in client._nodes) + with caplog.at_level(logger='pyignite', level=logging.DEBUG): + with client.connect("127.0.0.1", 10801): + assert all(node.alive for node in client._nodes) + + __assert_successful_connect_log(caplog) @pytest.mark.asyncio -async def test_auth_success_async(with_ssl, ssl_params): +async def test_auth_success_async(with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl client = AioClient(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) - async with client.connect("127.0.0.1", 10801): - assert all(node.alive for node in client._nodes) + with caplog.at_level(logger='pyignite', level=logging.DEBUG): + async with client.connect("127.0.0.1", 10801): + assert all(node.alive for node in client._nodes) + + __assert_successful_connect_log(caplog) + + +def __assert_successful_connect_log(caplog): + assert any(re.match(r'Connecting to node\(address=127.0.0.1,\s+port=10801', r.message) for r in caplog.records) + assert any(re.match(r'Connected to node\(address=127.0.0.1,\s+port=10801', r.message) for r in caplog.records) + assert any(re.match(r'Connection closed to node\(address=127.0.0.1,\s+port=10801', r.message) + for r in caplog.records) auth_failed_params = [ @@ -65,7 +81,7 @@ async def test_auth_success_async(with_ssl, ssl_params): 'username, password', auth_failed_params ) -def test_auth_failed(username, password, with_ssl, ssl_params): +def test_auth_failed(username, password, with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl with pytest.raises(AuthenticationError): @@ -73,16 +89,25 @@ def test_auth_failed(username, password, with_ssl, ssl_params): with client.connect("127.0.0.1", 10801): pass + __assert_auth_failed_log(caplog) + @pytest.mark.parametrize( 'username, password', auth_failed_params ) @pytest.mark.asyncio -async def test_auth_failed_async(username, password, with_ssl, ssl_params): +async def test_auth_failed_async(username, password, with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl with pytest.raises(AuthenticationError): client = AioClient(username=username, password=password, **ssl_params) async with client.connect("127.0.0.1", 10801): pass + + __assert_auth_failed_log(caplog) + + +def __assert_auth_failed_log(caplog): + pattern = r'Authentication failed while connecting to node\(address=127.0.0.1,\s+port=10801' + assert any(re.match(pattern, r.message) and r.levelname == logging.ERROR for r in caplog.records) diff --git a/tests/security/test_ssl.py b/tests/security/test_ssl.py index 7736864..2cbed4b 100644 --- a/tests/security/test_ssl.py +++ b/tests/security/test_ssl.py @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging +import re + import pytest from pyignite import Client, AioClient @@ -72,17 +75,26 @@ async def inner_async(): @pytest.mark.parametrize('invalid_ssl_params', invalid_params) -def test_connection_error_with_incorrect_config(invalid_ssl_params): +def test_connection_error_with_incorrect_config(invalid_ssl_params, caplog): with pytest.raises(ReconnectError): client = Client(**invalid_ssl_params) with client.connect([("127.0.0.1", 10801)]): pass + __assert_handshake_failed_log(caplog) + @pytest.mark.parametrize('invalid_ssl_params', invalid_params) @pytest.mark.asyncio -async def test_connection_error_with_incorrect_config_async(invalid_ssl_params): +async def test_connection_error_with_incorrect_config_async(invalid_ssl_params, caplog): with pytest.raises(ReconnectError): client = AioClient(**invalid_ssl_params) async with client.connect([("127.0.0.1", 10801)]): pass + + __assert_handshake_failed_log(caplog) + + +def __assert_handshake_failed_log(caplog): + pattern = r'Failed to perform handshake, connection to node\(address=127.0.0.1,\s+port=10801.*failed:' + assert any(re.match(pattern, r.message) and r.levelname == logging.ERROR for r in caplog.records) From 82f29e202ea4526b49721993e6ea356640ef66e6 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Tue, 20 Jul 2021 08:24:13 +0300 Subject: [PATCH 49/62] IGNITE-15102 Implement event handling and monitoring for python thin client - Fixes #46. --- docs/modules.rst | 1 + .../pyignite.connection.protocol_context.rst | 20 + docs/source/pyignite.connection.rst | 6 + docs/source/pyignite.monitoring.rst | 21 + docs/source/pyignite.rst | 1 + pyignite/aio_client.py | 17 +- pyignite/client.py | 24 +- pyignite/connection/aio_connection.py | 2 +- pyignite/connection/connection.py | 44 +- pyignite/connection/protocol_context.py | 3 + pyignite/monitoring.py | 457 ++++++++++++++++++ pyignite/queries/query.py | 27 +- pyignite/stream/aio_cluster.py | 53 -- tests/affinity/test_affinity.py | 6 +- .../affinity/test_affinity_request_routing.py | 152 +++--- tests/common/test_query_listener.py | 127 +++++ tests/custom/test_connection_events.py | 129 +++++ tests/security/conftest.py | 24 + tests/security/test_auth.py | 77 ++- tests/security/test_ssl.py | 29 +- 20 files changed, 1047 insertions(+), 173 deletions(-) create mode 100644 docs/source/pyignite.connection.protocol_context.rst create mode 100644 docs/source/pyignite.monitoring.rst create mode 100644 pyignite/monitoring.py delete mode 100644 pyignite/stream/aio_cluster.py create mode 100644 tests/common/test_query_listener.py create mode 100644 tests/custom/test_connection_events.py diff --git a/docs/modules.rst b/docs/modules.rst index 0cce570..bdeec8e 100644 --- a/docs/modules.rst +++ b/docs/modules.rst @@ -31,3 +31,4 @@ of `pyignite`, intended for end users. datatypes/parsers datatypes/cache_props Exceptions + Monitoring and handling events diff --git a/docs/source/pyignite.connection.protocol_context.rst b/docs/source/pyignite.connection.protocol_context.rst new file mode 100644 index 0000000..a5298ba --- /dev/null +++ b/docs/source/pyignite.connection.protocol_context.rst @@ -0,0 +1,20 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.connection.protocol_context package +=========================== + +.. automodule:: pyignite.connection.protocol_context + :members: \ No newline at end of file diff --git a/docs/source/pyignite.connection.rst b/docs/source/pyignite.connection.rst index 90c59db..29c2e57 100644 --- a/docs/source/pyignite.connection.rst +++ b/docs/source/pyignite.connection.rst @@ -20,3 +20,9 @@ pyignite.connection package :members: :undoc-members: :show-inheritance: + +Submodules +---------- + +.. toctree:: + pyignite.connection.protocol_context \ No newline at end of file diff --git a/docs/source/pyignite.monitoring.rst b/docs/source/pyignite.monitoring.rst new file mode 100644 index 0000000..98b137d --- /dev/null +++ b/docs/source/pyignite.monitoring.rst @@ -0,0 +1,21 @@ +.. Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +pyignite.monitoring module +=========================== + +.. automodule:: pyignite.monitoring + :members: + :member-order: bysource diff --git a/docs/source/pyignite.rst b/docs/source/pyignite.rst index 2e52500..7a0744c 100644 --- a/docs/source/pyignite.rst +++ b/docs/source/pyignite.rst @@ -44,4 +44,5 @@ Submodules pyignite.transaction pyignite.cursors pyignite.exceptions + pyignite.monitoring diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 0bb2b8c..083c964 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -16,7 +16,7 @@ import random import sys from itertools import chain -from typing import Iterable, Type, Union, Any, Dict, Optional +from typing import Iterable, Type, Union, Any, Dict, Optional, Sequence from .aio_cluster import AioCluster from .api import cache_get_node_partitions_async @@ -60,7 +60,8 @@ class AioClient(BaseClient): Asynchronous Client implementation. """ - def __init__(self, compact_footer: bool = None, partition_aware: bool = True, **kwargs): + def __init__(self, compact_footer: bool = None, partition_aware: bool = True, + event_listeners: Optional[Sequence] = None, **kwargs): """ Initialize client. @@ -71,9 +72,10 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = True, ** https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#schema :param partition_aware: (optional) try to calculate the exact data placement from the key before to issue the key operation to the - server node, `True` by default. + server node, `True` by default, + :param event_listeners: (optional) event listeners. """ - super().__init__(compact_footer, partition_aware, **kwargs) + super().__init__(compact_footer, partition_aware, event_listeners, **kwargs) self._registry_mux = asyncio.Lock() self._affinity_query_mux = asyncio.Lock() @@ -99,9 +101,8 @@ async def _connect(self, nodes): # do not try to open more nodes self._current_node = i - except connection_errors: - conn.failed = True + pass self._nodes.append(conn) @@ -301,7 +302,7 @@ async def _get_affinity(self, conn: 'AioConnection', caches: Iterable[int]) -> D """ for _ in range(AFFINITY_RETRIES or 1): result = await cache_get_node_partitions_async(conn, caches) - if result.status == 0 and result.value['partition_mapping']: + if result.status == 0: break await asyncio.sleep(AFFINITY_DELAY) @@ -341,7 +342,7 @@ async def get_best_node( asyncio.ensure_future( asyncio.gather( - *[conn.reconnect() for conn in self._nodes if not conn.alive], + *[node.reconnect() for node in self._nodes if not node.alive], return_exceptions=True ) ) diff --git a/pyignite/client.py b/pyignite/client.py index 6a499a3..e3dd71b 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -44,7 +44,7 @@ import random import re from itertools import chain -from typing import Iterable, Type, Union, Any, Dict, Optional +from typing import Iterable, Type, Union, Any, Dict, Optional, Sequence from .api import cache_get_node_partitions from .api.binary import get_binary_type, put_binary_type @@ -66,6 +66,7 @@ get_field_by_id, unsigned ) from .binary import GenericObjectMeta +from .monitoring import _EventListeners __all__ = ['Client'] @@ -76,7 +77,8 @@ class BaseClient: _identifier = re.compile(r'[^0-9a-zA-Z_.+$]', re.UNICODE) _ident_start = re.compile(r'^[^a-zA-Z_]+', re.UNICODE) - def __init__(self, compact_footer: bool = None, partition_aware: bool = False, **kwargs): + def __init__(self, compact_footer: bool = None, partition_aware: bool = False, + event_listeners: Optional[Sequence] = None, **kwargs): self._compact_footer = compact_footer self._partition_aware = partition_aware self._connection_args = kwargs @@ -87,6 +89,7 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = False, * self.affinity_version = (0, 0) self._affinity = {'version': self.affinity_version, 'partition_mapping': defaultdict(dict)} self._protocol_context = None + self._event_listeners = _EventListeners(event_listeners) @property def protocol_context(self): @@ -338,7 +341,8 @@ class Client(BaseClient): Synchronous Client implementation. """ - def __init__(self, compact_footer: bool = None, partition_aware: bool = True, **kwargs): + def __init__(self, compact_footer: bool = None, partition_aware: bool = True, + event_listeners: Optional[Sequence] = None, **kwargs): """ Initialize client. @@ -349,9 +353,10 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = True, ** https://ignite.apache.org/docs/latest/binary-client-protocol/data-format#schema :param partition_aware: (optional) try to calculate the exact data placement from the key before to issue the key operation to the - server node, `True` by default. + server node, `True` by default, + :param event_listeners: (optional) event listeners. """ - super().__init__(compact_footer, partition_aware, **kwargs) + super().__init__(compact_footer, partition_aware, event_listeners, **kwargs) def connect(self, *args): """ @@ -382,7 +387,6 @@ def _connect(self, nodes): self._current_node = i except connection_errors: - conn.failed = True if self.partition_aware: # schedule the reconnection conn.reconnect() @@ -565,7 +569,7 @@ def _get_affinity(self, conn: 'Connection', caches: Iterable[int]) -> Dict: """ for _ in range(AFFINITY_RETRIES or 1): result = cache_get_node_partitions(conn, caches) - if result.status == 0 and result.value['partition_mapping']: + if result.status == 0: break time.sleep(AFFINITY_DELAY) @@ -608,9 +612,9 @@ def get_best_node( self._update_affinity(full_affinity) - for conn in self._nodes: - if not conn.alive: - conn.reconnect() + for node in self._nodes: + if not node.alive: + node.reconnect() c_id = cache.cache_id if isinstance(cache, BaseCache) else cache_id(cache) parts = self._cache_partition_mapping(c_id).get('number_of_partitions') diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index c5fa24d..89de49d 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -190,10 +190,10 @@ async def _connect(self): self._on_handshake_fail(e) raise e except Exception as e: + self._on_handshake_fail(e) # restore undefined protocol version if detecting_protocol: self.client.protocol_context = None - self._on_handshake_fail(e) raise e self._on_handshake_success(result) diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index ae5587a..2b9970a 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -99,7 +99,9 @@ def _process_handshake_error(self, response): def _on_handshake_start(self): if logger.isEnabledFor(logging.DEBUG): logger.debug("Connecting to node(address=%s, port=%d) with protocol context %s", - self.host, self.port, self.client.protocol_context) + self.host, self.port, self.protocol_context) + if self._enabled_connection_listener: + self._connection_listener.publish_handshake_start(self.host, self.port, self.protocol_context) def _on_handshake_success(self, result): features = BitmaskFeature.from_array(result.get('features', None)) @@ -109,24 +111,45 @@ def _on_handshake_success(self, result): if logger.isEnabledFor(logging.DEBUG): logger.debug("Connected to node(address=%s, port=%d, node_uuid=%s) with protocol context %s", - self.host, self.port, self.uuid, self.client.protocol_context) + self.host, self.port, self.uuid, self.protocol_context) + if self._enabled_connection_listener: + self._connection_listener.publish_handshake_success(self.host, self.port, self.protocol_context, self.uuid) def _on_handshake_fail(self, err): + self.failed = True + if isinstance(err, AuthenticationError): logger.error("Authentication failed while connecting to node(address=%s, port=%d): %s", self.host, self.port, err) + if self._enabled_connection_listener: + self._connection_listener.publish_authentication_fail(self.host, self.port, self.protocol_context, err) else: logger.error("Failed to perform handshake, connection to node(address=%s, port=%d) " "with protocol context %s failed: %s", - self.host, self.port, self.client.protocol_context, err, exc_info=True) + self.host, self.port, self.protocol_context, err, exc_info=True) + if self._enabled_connection_listener: + self._connection_listener.publish_handshake_fail(self.host, self.port, self.protocol_context, err) def _on_connection_lost(self, err=None, expected=False): - if expected and logger.isEnabledFor(logging.DEBUG): - logger.debug("Connection closed to node(address=%s, port=%d, node_uuid=%s)", - self.host, self.port, self.uuid) + if expected: + if logger.isEnabledFor(logging.DEBUG): + logger.debug("Connection closed to node(address=%s, port=%d, node_uuid=%s)", + self.host, self.port, self.uuid) + if self._enabled_connection_listener: + self._connection_listener.publish_connection_closed(self.host, self.port, self.uuid) else: logger.info("Connection lost to node(address=%s, port=%d, node_uuid=%s): %s", self.host, self.port, self.uuid, err) + if self._enabled_connection_listener: + self._connection_listener.publish_connection_lost(self.host, self.port, self.uuid, err) + + @property + def _enabled_connection_listener(self): + return self.client._event_listeners and self.client._event_listeners.enabled_connection_listener + + @property + def _connection_listener(self): + return self.client._event_listeners class Connection(BaseConnection): @@ -216,10 +239,10 @@ def connect(self): self._on_handshake_fail(e) raise e except Exception as e: + self._on_handshake_fail(e) # restore undefined protocol version if detecting_protocol: self.client.protocol_context = None - self._on_handshake_fail(e) raise e self._on_handshake_success(result) @@ -260,7 +283,7 @@ def reconnect(self): if self.alive: return - self.close() + self.close(on_reconnect=True) # connect and silence the connection errors try: @@ -352,7 +375,7 @@ def recv(self, flags=None, reconnect=True) -> bytearray: return data - def close(self): + def close(self, on_reconnect=False): """ Try to mark socket closed, then unlink it. This is recommended but not required, since sockets are automatically closed when @@ -364,5 +387,6 @@ def close(self): self._socket.close() except connection_errors: pass - self._on_connection_lost(expected=True) + if not on_reconnect and not self.failed: + self._on_connection_lost(expected=True) self._socket = None diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py index 58f509e..ba6d9e4 100644 --- a/pyignite/connection/protocol_context.py +++ b/pyignite/connection/protocol_context.py @@ -44,6 +44,9 @@ def _ensure_consistency(self): if not self.is_feature_flags_supported(): self._features = None + def copy(self): + return ProtocolContext(self.version, self.features) + @property def version(self): return getattr(self, '_version', None) diff --git a/pyignite/monitoring.py b/pyignite/monitoring.py new file mode 100644 index 0000000..9bbfd20 --- /dev/null +++ b/pyignite/monitoring.py @@ -0,0 +1,457 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Tools to monitor client's events. + +For example, a simple query logger might be implemented like this:: + + import logging + + from pyignite import monitoring + + class QueryLogger(monitoring.QueryEventListener): + + def on_query_start(self, event): + logging.info(f"Query {event.op_name} with query id " + f"{event.query_id} started on server " + f"{event.host}:{event.port}") + + def on_query_fail(self, event): + logging.info(f"Query {event.op_name} with query id " + f"{event.query_id} on server " + f"{event.host}:{event.port} " + f"failed in {event.duration}ms " + f"with error {event.error_msg}") + + def on_query_success(self, event): + logging.info(f"Query {event.op_name} with query id " + f"{event.query_id} on server " \ + f"{event.host}:{event.port} " \ + f"succeeded in {event.duration}ms") + +:class:`~ConnectionEventListener` is also available. + +Event listeners can be registered by passing parameter to :class:`~pyignite.client.Client` or +:class:`~pyignite.aio_client.AioClient` constructor:: + + client = Client(event_listeners=[QueryLogger()]) + with client.connect('127.0.0.1', 10800): + .... + +.. note:: Events are delivered **synchronously**. Application threads block + waiting for event handlers. Care must be taken to ensure that your event handlers are efficient + enough to not adversely affect overall application performance. + +.. note:: Debug logging is also available, standard ``logging`` is used. Just set ``DEBUG`` level to + *pyignite* logger. +| +| +""" +from typing import Optional, Sequence + + +class _BaseEvent: + def __init__(self, **kwargs): + if kwargs: + for k, v in kwargs.items(): + object.__setattr__(self, k, v) + + def __setattr__(self, name, value): + raise TypeError(f'{self.__class__.__name__} is immutable') + + def __repr__(self): + pass + + +class _ConnectionEvent(_BaseEvent): + __slots__ = ('host', 'port') + host: str + port: int + + def __init__(self, host, port, **kwargs): + super().__init__(host=host, port=port, **kwargs) + + +class _HandshakeEvent(_ConnectionEvent): + __slots__ = ('protocol_context',) + protocol_context: Optional['ProtocolContext'] + + def __init__(self, host, port, protocol_context=None, **kwargs): + super().__init__(host, port, protocol_context=protocol_context.copy() if protocol_context else None, **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"protocol_context={self.protocol_context})" + + +class HandshakeStartEvent(_HandshakeEvent): + """ + Published when a handshake started. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client's protocol context. + """ + def __init__(self, host, port, protocol_context=None, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, protocol_context, **kwargs) + + +class HandshakeFailedEvent(_HandshakeEvent): + """ + Published when a handshake failed. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client's protocol context, + :ivar error_msg: Error message. + """ + __slots__ = ('error_msg',) + error_msg: str + + def __init__(self, host, port, protocol_context=None, err=None, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, protocol_context, error_msg=repr(err) if err else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"protocol_context={self.protocol_context}, error_msg={self.error_msg})" + + +class AuthenticationFailedEvent(HandshakeFailedEvent): + """ + Published when an authentication is failed. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client protocol context, + :ivar error_msg: Error message. + """ + pass + + +class HandshakeSuccessEvent(_HandshakeEvent): + """ + Published when a handshake succeeded. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar protocol_context: Client's protocol context, + :ivar node_uuid: Node's uuid, string. + """ + __slots__ = ('node_uuid',) + node_uuid: str + + def __init__(self, host, port, protocol_context, node_uuid, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, protocol_context, node_uuid=str(node_uuid) if node_uuid else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, protocol_context={self.protocol_context})" + + +class ConnectionClosedEvent(_ConnectionEvent): + """ + Published when a connection to the node is expectedly closed. + + :ivar host: Address of node to connect, + :ivar port: Port number of node to connect, + :ivar node_uuid: Node uuid, string. + """ + __slots__ = ('node_uuid',) + node_uuid: str + + def __init__(self, host, port, node_uuid, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, node_uuid=str(node_uuid) if node_uuid else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, node_uuid={self.node_uuid})" + + +class ConnectionLostEvent(ConnectionClosedEvent): + """ + Published when a connection to the node is lost. + + :ivar host: Address of the node to connect, + :ivar port: Port number of the node to connect, + :ivar node_uuid: Node's uuid, string, + :ivar error_msg: Error message. + """ + __slots__ = ('error_msg',) + node_uuid: str + error_msg: str + + def __init__(self, host, port, node_uuid, err, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host, port, node_uuid, error_msg=repr(err) if err else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, error_msg={self.error_msg})" + + +class _EventListener: + pass + + +class ConnectionEventListener(_EventListener): + """ + Base class for connection event listeners. + """ + def on_handshake_start(self, event: HandshakeStartEvent): + """ + Handle handshake start event. + + :param event: Instance of :class:`HandshakeStartEvent`. + """ + pass + + def on_handshake_success(self, event: HandshakeSuccessEvent): + """ + Handle handshake success event. + + :param event: Instance of :class:`HandshakeSuccessEvent`. + """ + pass + + def on_handshake_fail(self, event: HandshakeFailedEvent): + """ + Handle handshake failed event. + + :param event: Instance of :class:`HandshakeFailedEvent`. + """ + pass + + def on_authentication_fail(self, event: AuthenticationFailedEvent): + """ + Handle authentication failed event. + + :param event: Instance of :class:`AuthenticationFailedEvent`. + """ + pass + + def on_connection_closed(self, event: ConnectionClosedEvent): + """ + Handle connection closed event. + + :param event: Instance of :class:`ConnectionClosedEvent`. + """ + pass + + def on_connection_lost(self, event: ConnectionLostEvent): + """ + Handle connection lost event. + + :param event: Instance of :class:`ConnectionLostEvent`. + """ + pass + + +class _QueryEvent(_BaseEvent): + __slots__ = ('host', 'port', 'node_uuid', 'query_id', 'op_code', 'op_name') + host: str + port: int + node_uuid: str + query_id: int + op_code: int + op_name: str + + def __init__(self, host, port, node_uuid, query_id, op_code, op_name, **kwargs): + """ + This class is not supposed to be constructed by user. + """ + super().__init__(host=host, port=port, node_uuid=str(node_uuid) if node_uuid else '', + query_id=query_id, op_code=op_code, op_name=op_name, **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, query_id={self.query_id}, " \ + f"op_code={self.op_code}, op_name={self.op_name})" + + +class QueryStartEvent(_QueryEvent): + """ + Published when a client's query started. + + :ivar host: Address of the node on which the query is executed, + :ivar port: Port number of the node on which the query is executed, + :ivar node_uuid: Node's uuid, string, + :ivar query_id: Query's id, + :ivar op_code: Operation's id, + :ivar op_name: Operation's name. + """ + pass + + +class QuerySuccessEvent(_QueryEvent): + """ + Published when a client's query finished successfully. + + :ivar host: Address of the node on which the query is executed, + :ivar port: Port number of the node on which the query is executed, + :ivar node_uuid: Node's uuid, string, + :ivar query_id: Query's id, + :ivar op_code: Operation's id, + :ivar op_name: Operation's name, + :ivar duration: Query's duration in milliseconds. + """ + __slots__ = ('duration', ) + duration: int + + def __init__(self, host, port, node_uuid, query_id, op_code, op_name, duration, **kwargs): + super().__init__(host, port, node_uuid, query_id, op_code, op_name, duration=duration, **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, query_id={self.query_id}, " \ + f"op_code={self.op_code}, op_name={self.op_name}, duration={self.duration})" + + +class QueryFailEvent(_QueryEvent): + """ + Published when a client's query failed. + + :ivar host: Address of the node on which the query is executed, + :ivar port: Port number of the node on which the query is executed, + :ivar node_uuid: Node's uuid, string, + :ivar query_id: Query's id, + :ivar op_code: Operation's id, + :ivar op_name: Operation's name, + :ivar duration: Query's duration in milliseconds, + :ivar error_msg: Error message. + """ + __slots__ = ('duration', 'err_msg') + duration: int + err_msg: str + + def __init__(self, host, port, node_uuid, query_id, op_code, op_name, duration, err, **kwargs): + super().__init__(host, port, node_uuid, query_id, op_code, op_name, duration=duration, + err_msg=repr(err) if err else '', **kwargs) + + def __repr__(self): + return f"{self.__class__.__name__}(host={self.host}, port={self.port}, " \ + f"node_uuid={self.node_uuid}, query_id={self.query_id}, op_code={self.op_code}, " \ + f"op_name={self.op_name}, duration={self.duration}, err_msg={self.err_msg})" + + +class QueryEventListener(_EventListener): + """ + Base class for query event listeners. + """ + def on_query_start(self, event: QueryStartEvent): + """ + Handle query start event. + + :param event: Instance of :class:`QueryStartEvent`. + """ + pass + + def on_query_success(self, event: QuerySuccessEvent): + """ + Handle query success event. + + :param event: Instance of :class:`QuerySuccessEvent`. + """ + pass + + def on_query_fail(self, event: QueryFailEvent): + """ + Handle query fail event. + + :param event: Instance of :class:`QueryFailEvent`. + """ + pass + + +class _EventListeners: + def __init__(self, listeners: Optional[Sequence]): + self.__connection_listeners = [] + self.__query_listeners = [] + if listeners: + for listener in listeners: + if isinstance(listener, ConnectionEventListener): + self.__connection_listeners.append(listener) + elif isinstance(listener, QueryEventListener): + self.__query_listeners.append(listener) + + @property + def enabled_connection_listener(self): + return bool(self.__connection_listeners) + + @property + def enabled_query_listener(self): + return bool(self.__query_listeners) + + def publish_handshake_start(self, host, port, protocol_context): + evt = HandshakeStartEvent(host, port, protocol_context) + self.__publish_connection_events(lambda listener: listener.on_handshake_start(evt)) + + def publish_handshake_success(self, host, port, protocol_context, node_uuid): + evt = HandshakeSuccessEvent(host, port, protocol_context, node_uuid) + self.__publish_connection_events(lambda listener: listener.on_handshake_success(evt)) + + def publish_handshake_fail(self, host, port, protocol_context, err): + evt = HandshakeFailedEvent(host, port, protocol_context, err) + self.__publish_connection_events(lambda listener: listener.on_handshake_fail(evt)) + + def publish_authentication_fail(self, host, port, protocol_context, err): + evt = AuthenticationFailedEvent(host, port, protocol_context, err) + self.__publish_connection_events(lambda listener: listener.on_authentication_fail(evt)) + + def publish_connection_closed(self, host, port, node_uuid): + evt = ConnectionClosedEvent(host, port, node_uuid) + self.__publish_connection_events(lambda listener: listener.on_connection_closed(evt)) + + def publish_connection_lost(self, host, port, node_uuid, err): + evt = ConnectionLostEvent(host, port, node_uuid, err) + self.__publish_connection_events(lambda listener: listener.on_connection_lost(evt)) + + def publish_query_start(self, host, port, node_uuid, query_id, op_code, op_name): + evt = QueryStartEvent(host, port, node_uuid, query_id, op_code, op_name) + self.__publish_query_events(lambda listener: listener.on_query_start(evt)) + + def publish_query_success(self, host, port, node_uuid, query_id, op_code, op_name, duration): + evt = QuerySuccessEvent(host, port, node_uuid, query_id, op_code, op_name, duration) + self.__publish_query_events(lambda listener: listener.on_query_success(evt)) + + def publish_query_fail(self, host, port, node_uuid, query_id, op_code, op_name, duration, err): + evt = QueryFailEvent(host, port, node_uuid, query_id, op_code, op_name, duration, err) + self.__publish_query_events(lambda listener: listener.on_query_fail(evt)) + + def __publish_connection_events(self, callback): + try: + for listener in self.__connection_listeners: + callback(listener) + except: # noqa: 13 + pass + + def __publish_query_events(self, callback): + try: + for listener in self.__query_listeners: + callback(listener) + except: # noqa: 13 + pass diff --git a/pyignite/queries/query.py b/pyignite/queries/query.py index 89c354e..c141b26 100644 --- a/pyignite/queries/query.py +++ b/pyignite/queries/query.py @@ -227,12 +227,25 @@ def __post_process_response(conn, response_struct, response): # build result return APIResult(response) + @staticmethod + def _enabled_query_listener(conn): + client = conn.client + return client._event_listeners and client._event_listeners.enabled_query_listener + + @staticmethod + def _event_listener(conn): + return conn.client._event_listeners + def _on_query_started(self, conn): + self._start_ts = time.monotonic() if logger.isEnabledFor(logging.DEBUG): - self._start_ts = time.monotonic() logger.debug("Start query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s)", self.query_id, _get_op_code_name(self.op_code), conn.host, conn.port, conn.uuid) + if self._enabled_query_listener(conn): + self._event_listener(conn).publish_query_start(conn.host, conn.port, conn.uuid, self.query_id, + self.op_code, _get_op_code_name(self.op_code)) + def _on_query_finished(self, conn, result=None, err=None): if logger.isEnabledFor(logging.DEBUG): dur_ms = _sec_to_millis(time.monotonic() - self._start_ts) @@ -240,12 +253,20 @@ def _on_query_finished(self, conn, result=None, err=None): err = result.message if err: logger.debug("Failed to perform query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s) " - "in %.3f ms: %s", self.query_id, _get_op_code_name(self.op_code), + "in %d ms: %s", self.query_id, _get_op_code_name(self.op_code), conn.host, conn.port, conn.uuid, dur_ms, err) + if self._enabled_query_listener(conn): + self._event_listener(conn).publish_query_fail(conn.host, conn.port, conn.uuid, self.query_id, + self.op_code, _get_op_code_name(self.op_code), + dur_ms, err) else: logger.debug("Finished query(query_id=%d, op_type=%s, host=%s, port=%d, node_id=%s) " - "successfully in %.3f ms", self.query_id, _get_op_code_name(self.op_code), + "successfully in %d ms", self.query_id, _get_op_code_name(self.op_code), conn.host, conn.port, conn.uuid, dur_ms) + if self._enabled_query_listener(conn): + self._event_listener(conn).publish_query_success(conn.host, conn.port, conn.uuid, self.query_id, + self.op_code, _get_op_code_name(self.op_code), + dur_ms) class ConfigQuery(Query): diff --git a/pyignite/stream/aio_cluster.py b/pyignite/stream/aio_cluster.py deleted file mode 100644 index 8a2f98e..0000000 --- a/pyignite/stream/aio_cluster.py +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module contains `AioCluster` that lets you get info and change state of the -whole cluster. -""" -from pyignite import AioClient -from pyignite.api.cluster import cluster_get_state_async, cluster_set_state_async - - -class AioCluster: - """ - Ignite cluster abstraction. Users should never use this class directly, - but construct its instances with - :py:meth:`~pyignite.aio_client.AioClient.get_cluster` method instead. - """ - - def __init__(self, client: 'AioClient'): - self._client = client - - async def get_state(self): - """ - Gets current cluster state. - - :return: Current cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. - """ - return await cluster_get_state_async(await self._client.random_node()) - - async def set_state(self, state): - """ - Changes current cluster state to the given. - - Note: Deactivation clears in-memory caches (without persistence) - including the system caches. - - :param state: New cluster state. This is one of ClusterState.INACTIVE, - ClusterState.ACTIVE or ClusterState.ACTIVE_READ_ONLY. - """ - return await cluster_set_state_async(await self._client.random_node(), state) diff --git a/tests/affinity/test_affinity.py b/tests/affinity/test_affinity.py index 3097991..c9a6b60 100644 --- a/tests/affinity/test_affinity.py +++ b/tests/affinity/test_affinity.py @@ -36,7 +36,6 @@ def test_get_node_partitions(client, caches): cache_ids = [cache.cache_id for cache in caches] - __wait_for_ready_affinity(client, cache_ids) mappings = __get_mappings(client, cache_ids) __check_mappings(mappings, cache_ids) @@ -44,7 +43,6 @@ def test_get_node_partitions(client, caches): @pytest.mark.asyncio async def test_get_node_partitions_async(async_client, async_caches): cache_ids = [cache.cache_id for cache in async_caches] - await __wait_for_ready_affinity(async_client, cache_ids) mappings = await __get_mappings(async_client, cache_ids) __check_mappings(mappings, cache_ids) @@ -157,6 +155,7 @@ def inner(): caches = [] try: caches = generate_caches() + __wait_for_ready_affinity(client, [cache.cache_id for cache in caches]) yield caches finally: for cache in caches: @@ -166,6 +165,7 @@ async def inner_async(): caches = [] try: caches = await generate_caches() + await __wait_for_ready_affinity(client, [cache.cache_id for cache in caches]) yield caches finally: await asyncio.gather(*[cache.destroy() for cache in caches]) @@ -180,6 +180,7 @@ def cache(client): PROP_CACHE_MODE: CacheMode.PARTITIONED, }) try: + __wait_for_ready_affinity(client, [cache.cache_id]) yield cache finally: cache.destroy() @@ -192,6 +193,7 @@ async def async_cache(async_client): PROP_CACHE_MODE: CacheMode.PARTITIONED, }) try: + await __wait_for_ready_affinity(async_client, [cache.cache_id]) yield cache finally: await cache.destroy() diff --git a/tests/affinity/test_affinity_request_routing.py b/tests/affinity/test_affinity_request_routing.py index 0d0ec24..b73eff3 100644 --- a/tests/affinity/test_affinity_request_routing.py +++ b/tests/affinity/test_affinity_request_routing.py @@ -22,11 +22,10 @@ from pyignite import GenericObjectMeta, AioClient, Client from pyignite.aio_cache import AioCache -from pyignite.connection import Connection, AioConnection -from pyignite.constants import PROTOCOL_BYTE_ORDER from pyignite.datatypes import String, LongObject from pyignite.datatypes.cache_config import CacheMode from pyignite.datatypes.prop_codes import PROP_NAME, PROP_BACKUPS_NUMBER, PROP_CACHE_KEY_CONFIGURATION, PROP_CACHE_MODE +from pyignite.monitoring import QueryEventListener from tests.util import wait_for_condition, wait_for_condition_async, start_ignite, kill_process_tree try: @@ -35,41 +34,37 @@ from async_generator import asynccontextmanager requests = deque() -old_send = Connection.send -old_send_async = AioConnection._send -def patched_send(self, *args, **kwargs): - """Patched send function that push to queue idx of server to which request is routed.""" - buf = args[0] - if buf and len(buf) >= 6: - op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER) - # Filter only caches operation. - if 1000 <= op_code < 1100: - requests.append(self.port % 100) - return old_send(self, *args, **kwargs) +class QueryRouteListener(QueryEventListener): + def on_query_start(self, event): + if 1000 <= event.op_code < 1100: + requests.append(event.port % 100) -async def patched_send_async(self, *args, **kwargs): - """Patched send function that push to queue idx of server to which request is routed.""" - buf = args[1] - if buf and len(buf) >= 6: - op_code = int.from_bytes(buf[4:6], byteorder=PROTOCOL_BYTE_ORDER) - # Filter only caches operation. - if 1000 <= op_code < 1100: - requests.append(self.port % 100) - return await old_send_async(self, *args, **kwargs) +client_connection_string = [('127.0.0.1', 10800 + idx) for idx in range(1, 5)] -def setup_function(): - requests.clear() - Connection.send = patched_send - AioConnection._send = patched_send_async +@pytest.fixture +def client(): + client = Client(partition_aware=True, event_listeners=[QueryRouteListener()]) + try: + client.connect(client_connection_string) + yield client + finally: + requests.clear() + client.close() -def teardown_function(): - Connection.send = old_send - AioConnection.send = old_send_async +@pytest.fixture +async def async_client(event_loop): + client = AioClient(partition_aware=True, event_listeners=[QueryRouteListener()]) + try: + await client.connect(client_connection_string) + yield client + finally: + requests.clear() + await client.close() def wait_for_affinity_distribution(cache, key, node_idx, timeout=30): @@ -112,7 +107,8 @@ async def check_grid_idx(): @pytest.mark.parametrize("key,grid_idx", [(1, 1), (2, 2), (3, 3), (4, 1), (5, 1), (6, 2), (11, 1), (13, 1), (19, 1)]) @pytest.mark.parametrize("backups", [0, 1, 2, 3]) -def test_cache_operation_on_primitive_key_routes_request_to_primary_node(request, key, grid_idx, backups, client): +def test_cache_operation_on_primitive_key_routes_request_to_primary_node(request, key, grid_idx, backups, + client): cache = client.get_or_create_cache({ PROP_NAME: request.node.name + str(backups), PROP_BACKUPS_NUMBER: backups, @@ -210,47 +206,24 @@ class AffinityTestType1( assert requests.pop() == grid_idx -client_routed_connection_string = [('127.0.0.1', 10800 + idx) for idx in range(1, 5)] - - @pytest.fixture -def client_routed(): - client = Client(partition_aware=True) - try: - client.connect(client_routed_connection_string) - yield client - finally: - client.close() - - -@pytest.fixture -def client_routed_cache(client_routed, request): - yield client_routed.get_or_create_cache(request.node.name) +def client_cache(client, request): + yield client.get_or_create_cache(request.node.name) @pytest.fixture -async def async_client_routed(event_loop): - client = AioClient(partition_aware=True) - try: - await client.connect(client_routed_connection_string) - yield client - finally: - await client.close() - - -@pytest.fixture -async def async_client_routed_cache(async_client_routed, request): - cache = await async_client_routed.get_or_create_cache(request.node.name) +async def async_client_cache(async_client, request): + cache = await async_client.get_or_create_cache(request.node.name) yield cache -def test_cache_operation_routed_to_new_cluster_node(client_routed_cache): - __perform_cache_operation_routed_to_new_node(client_routed_cache) +def test_cache_operation_routed_to_new_cluster_node(client_cache): + __perform_cache_operation_routed_to_new_node(client_cache) @pytest.mark.asyncio -async def test_cache_operation_routed_to_new_cluster_node_async(async_client_routed_cache): - await __perform_cache_operation_routed_to_new_node(async_client_routed_cache) +async def test_cache_operation_routed_to_new_cluster_node_async(async_client_cache): + await __perform_cache_operation_routed_to_new_node(async_client_cache) def __perform_cache_operation_routed_to_new_node(cache): @@ -328,6 +301,55 @@ async def test_replicated_cache_operation_routed_to_random_node_async(async_repl await verify_random_node(async_replicated_cache) +def test_replicated_cache_operation_not_routed_to_failed_node(replicated_cache): + srv = start_ignite(idx=4) + try: + while True: + replicated_cache.put(1, 1) + + if requests.pop() == 4: + break + + kill_process_tree(srv.pid) + + num_failures = 0 + for i in range(100): + # Request may fail one time, because query can be requested before affinity update or connection + # lost will be detected. + try: + replicated_cache.put(1, 1) + except: # noqa 13 + num_failures += 1 + assert num_failures <= 1, "Expected no more than 1 failure." + finally: + kill_process_tree(srv.pid) + + +@pytest.mark.asyncio +async def test_replicated_cache_operation_not_routed_to_failed_node_async(async_replicated_cache): + srv = start_ignite(idx=4) + try: + while True: + await async_replicated_cache.put(1, 1) + + if requests.pop() == 4: + break + + kill_process_tree(srv.pid) + + num_failures = 0 + for i in range(100): + # Request may fail one time, because query can be requested before affinity update or connection + # lost will be detected. + try: + await async_replicated_cache.put(1, 1) + except: # noqa 13 + num_failures += 1 + assert num_failures <= 1, "Expected no more than 1 failure." + finally: + kill_process_tree(srv.pid) + + def verify_random_node(cache): key = 1 @@ -423,8 +445,8 @@ async def test_new_registered_cache_affinity_async(async_client): assert requests.pop() == 3 -def test_all_registered_cache_updated_on_new_server(client_routed): - with create_caches(client_routed) as caches: +def test_all_registered_cache_updated_on_new_server(client): + with create_caches(client) as caches: key = 12 test_cache = random.choice(caches) wait_for_affinity_distribution(test_cache, key, 3) @@ -444,8 +466,8 @@ def test_all_registered_cache_updated_on_new_server(client_routed): @pytest.mark.asyncio -async def test_all_registered_cache_updated_on_new_server_async(async_client_routed): - async with create_caches_async(async_client_routed) as caches: +async def test_all_registered_cache_updated_on_new_server_async(async_client): + async with create_caches_async(async_client) as caches: key = 12 test_cache = random.choice(caches) await wait_for_affinity_distribution_async(test_cache, key, 3) diff --git a/tests/common/test_query_listener.py b/tests/common/test_query_listener.py new file mode 100644 index 0000000..afff542 --- /dev/null +++ b/tests/common/test_query_listener.py @@ -0,0 +1,127 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from pyignite import Client, AioClient +from pyignite.exceptions import CacheError +from pyignite.monitoring import QueryEventListener, QueryStartEvent, QueryFailEvent, QuerySuccessEvent +from pyignite.queries.op_codes import OP_CACHE_PUT, OP_CACHE_PARTITIONS, OP_CLUSTER_GET_STATE + +events = [] + + +class QueryRouteListener(QueryEventListener): + def on_query_start(self, event): + if event.op_code != OP_CACHE_PARTITIONS: + events.append(event) + + def on_query_fail(self, event): + if event.op_code != OP_CACHE_PARTITIONS: + events.append(event) + + def on_query_success(self, event): + if event.op_code != OP_CACHE_PARTITIONS: + events.append(event) + + +@pytest.fixture +def client(): + client = Client(event_listeners=[QueryRouteListener()]) + try: + client.connect('127.0.0.1', 10801) + yield client + finally: + client.close() + events.clear() + + +@pytest.fixture +async def async_client(event_loop): + client = AioClient(event_listeners=[QueryRouteListener()]) + try: + await client.connect('127.0.0.1', 10801) + yield client + finally: + await client.close() + events.clear() + + +def test_query_fail_events(request, client): + with pytest.raises(CacheError): + cache = client.get_cache(request.node.name) + cache.put(1, 1) + + __assert_fail_events(client) + + +@pytest.mark.asyncio +async def test_query_fail_events_async(request, async_client): + with pytest.raises(CacheError): + cache = await async_client.get_cache(request.node.name) + await cache.put(1, 1) + + __assert_fail_events(async_client) + + +def __assert_fail_events(client): + assert len(events) == 2 + conn = client._nodes[0] + for ev in events: + if isinstance(ev, QueryStartEvent): + assert ev.op_code == OP_CACHE_PUT + assert ev.op_name == 'OP_CACHE_PUT' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + + if isinstance(ev, QueryFailEvent): + assert ev.op_code == OP_CACHE_PUT + assert ev.op_name == 'OP_CACHE_PUT' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + assert 'Cache does not exist' in ev.err_msg + assert ev.duration > 0 + + +def test_query_success_events(client): + client.get_cluster().get_state() + __assert_success_events(client) + + +@pytest.mark.asyncio +async def test_query_success_events_async(async_client): + await async_client.get_cluster().get_state() + __assert_success_events(async_client) + + +def __assert_success_events(client): + assert len(events) == 2 + conn = client._nodes[0] + for ev in events: + if isinstance(ev, QueryStartEvent): + assert ev.op_code == OP_CLUSTER_GET_STATE + assert ev.op_name == 'OP_CLUSTER_GET_STATE' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + + if isinstance(ev, QuerySuccessEvent): + assert ev.op_code == OP_CLUSTER_GET_STATE + assert ev.op_name == 'OP_CLUSTER_GET_STATE' + assert ev.host == conn.host + assert ev.port == conn.port + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + assert ev.duration > 0 diff --git a/tests/custom/test_connection_events.py b/tests/custom/test_connection_events.py new file mode 100644 index 0000000..bee9395 --- /dev/null +++ b/tests/custom/test_connection_events.py @@ -0,0 +1,129 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import random + +import pytest + +from pyignite import Client, AioClient +from pyignite.monitoring import ConnectionEventListener, ConnectionLostEvent, ConnectionClosedEvent, \ + HandshakeSuccessEvent, HandshakeFailedEvent, HandshakeStartEvent + +from tests.util import start_ignite_gen, kill_process_tree + + +@pytest.fixture(autouse=True) +def server1(): + yield from start_ignite_gen(idx=1) + + +@pytest.fixture(autouse=True) +def server2(): + yield from start_ignite_gen(idx=2) + + +events = [] + + +def teardown_function(): + events.clear() + + +class RecordingConnectionEventListener(ConnectionEventListener): + def on_handshake_start(self, event): + events.append(event) + + def on_handshake_success(self, event): + events.append(event) + + def on_handshake_fail(self, event): + events.append(event) + + def on_authentication_fail(self, event): + events.append(event) + + def on_connection_closed(self, event): + events.append(event) + + def on_connection_lost(self, event): + events.append(event) + + +def test_events(request, server2): + client = Client(event_listeners=[RecordingConnectionEventListener()]) + with client.connect([('127.0.0.1', 10800 + idx) for idx in range(1, 3)]): + protocol_context = client.protocol_context + nodes = {conn.port: conn for conn in client._nodes} + cache = client.get_or_create_cache(request.node.name) + kill_process_tree(server2.pid) + + while True: + try: + cache.put(random.randint(0, 1000), 1) + except: # noqa 13 + pass + + if any(isinstance(e, ConnectionLostEvent) for e in events): + break + + __assert_events(nodes, protocol_context) + + +@pytest.mark.asyncio +async def test_events_async(request, server2): + client = AioClient(event_listeners=[RecordingConnectionEventListener()]) + async with client.connect([('127.0.0.1', 10800 + idx) for idx in range(1, 3)]): + protocol_context = client.protocol_context + nodes = {conn.port: conn for conn in client._nodes} + cache = await client.get_or_create_cache(request.node.name) + kill_process_tree(server2.pid) + + while True: + try: + await cache.put(random.randint(0, 1000), 1) + except: # noqa 13 + pass + + if any(isinstance(e, ConnectionLostEvent) for e in events): + break + + __assert_events(nodes, protocol_context) + + +def __assert_events(nodes, protocol_context): + assert len([e for e in events if isinstance(e, ConnectionLostEvent)]) == 1 + # ConnectionLostEvent is a subclass of ConnectionClosedEvent + assert len([e for e in events if type(e) == ConnectionClosedEvent]) == 1 + assert len([e for e in events if isinstance(e, HandshakeSuccessEvent)]) == 2 + + for ev in events: + assert ev.host == '127.0.0.1' + if isinstance(ev, ConnectionLostEvent): + assert ev.port == 10802 + assert ev.node_uuid == str(nodes[ev.port].uuid) + assert ev.error_msg + elif isinstance(ev, HandshakeStartEvent): + assert ev.protocol_context == protocol_context + assert ev.port in {10801, 10802} + elif isinstance(ev, HandshakeFailedEvent): + assert ev.port == 10802 + assert ev.protocol_context == protocol_context + assert ev.error_msg + elif isinstance(ev, HandshakeSuccessEvent): + assert ev.port in {10801, 10802} + assert ev.node_uuid == str(nodes[ev.port].uuid) + assert ev.protocol_context == protocol_context + elif isinstance(ev, ConnectionClosedEvent): + assert ev.port == 10801 + assert ev.node_uuid == str(nodes[ev.port].uuid) diff --git a/tests/security/conftest.py b/tests/security/conftest.py index d5de5a1..8845c31 100644 --- a/tests/security/conftest.py +++ b/tests/security/conftest.py @@ -16,6 +16,7 @@ import pytest +from pyignite import monitoring from tests.util import get_test_dir @@ -47,3 +48,26 @@ def __create_ssl_param(with_password=False): 'ssl_certfile': cert, 'ssl_ca_certfile': cert } + + +class AccumulatingConnectionListener(monitoring.ConnectionEventListener): + def __init__(self): + self.events = [] + + def on_handshake_start(self, event): + self.events.append(event) + + def on_handshake_success(self, event): + self.events.append(event) + + def on_handshake_fail(self, event): + self.events.append(event) + + def on_authentication_fail(self, event): + self.events.append(event) + + def on_connection_closed(self, event): + self.events.append(event) + + def on_connection_lost(self, event): + self.events.append(event) diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py index 3586c91..503cf88 100644 --- a/tests/security/test_auth.py +++ b/tests/security/test_auth.py @@ -18,7 +18,11 @@ import pytest from pyignite import Client, AioClient +from pyignite.monitoring import ( + HandshakeStartEvent, HandshakeSuccessEvent, ConnectionClosedEvent, AuthenticationFailedEvent +) from pyignite.exceptions import AuthenticationError +from tests.security.conftest import AccumulatingConnectionListener from tests.util import start_ignite_gen, clear_ignite_work_dir DEFAULT_IGNITE_USERNAME = 'ignite' @@ -44,32 +48,58 @@ def cleanup(): def test_auth_success(with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl - client = Client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) + listener = AccumulatingConnectionListener() + client = Client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, + event_listeners=[listener], **ssl_params) with caplog.at_level(logger='pyignite', level=logging.DEBUG): with client.connect("127.0.0.1", 10801): assert all(node.alive for node in client._nodes) + conn = client._nodes[0] - __assert_successful_connect_log(caplog) + __assert_successful_connect_log(conn, caplog) + __assert_successful_connect_events(conn, listener) @pytest.mark.asyncio async def test_auth_success_async(with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl - client = AioClient(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, **ssl_params) + listener = AccumulatingConnectionListener() + client = AioClient(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, + event_listeners=[listener], **ssl_params) with caplog.at_level(logger='pyignite', level=logging.DEBUG): async with client.connect("127.0.0.1", 10801): assert all(node.alive for node in client._nodes) + conn = client._nodes[0] - __assert_successful_connect_log(caplog) + __assert_successful_connect_log(conn, caplog) + __assert_successful_connect_events(conn, listener) -def __assert_successful_connect_log(caplog): - assert any(re.match(r'Connecting to node\(address=127.0.0.1,\s+port=10801', r.message) for r in caplog.records) - assert any(re.match(r'Connected to node\(address=127.0.0.1,\s+port=10801', r.message) for r in caplog.records) - assert any(re.match(r'Connection closed to node\(address=127.0.0.1,\s+port=10801', r.message) +def __assert_successful_connect_log(conn, caplog): + assert any(re.match(rf'Connecting to node\(address={conn.host},\s+port={conn.port}', r.message) + for r in caplog.records) + assert any(re.match(rf'Connected to node\(address={conn.host},\s+port={conn.port}', r.message) + for r in caplog.records) + assert any(re.match(rf'Connection closed to node\(address={conn.host},\s+port={conn.port}', r.message) for r in caplog.records) +def __assert_successful_connect_events(conn, listener): + event_classes = (HandshakeStartEvent, HandshakeSuccessEvent, ConnectionClosedEvent) + + for cls in event_classes: + any(isinstance(ev, cls) for ev in listener.events) + + for ev in listener.events: + if isinstance(ev, event_classes): + assert ev.host == conn.host + assert ev.port == conn.port + if isinstance(ev, (HandshakeSuccessEvent, ConnectionClosedEvent)): + assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + if isinstance(ev, HandshakeSuccessEvent): + assert ev.protocol_context + + auth_failed_params = [ [DEFAULT_IGNITE_USERNAME, None], ['invalid_user', 'invalid_password'], @@ -83,13 +113,15 @@ def __assert_successful_connect_log(caplog): ) def test_auth_failed(username, password, with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl - + listener = AccumulatingConnectionListener() with pytest.raises(AuthenticationError): - client = Client(username=username, password=password, **ssl_params) + client = Client(username=username, password=password, + event_listeners=[listener], **ssl_params) with client.connect("127.0.0.1", 10801): pass - __assert_auth_failed_log(caplog) + __assert_auth_failed_log(caplog) + __assert_auth_failed_listener(listener) @pytest.mark.parametrize( @@ -99,15 +131,30 @@ def test_auth_failed(username, password, with_ssl, ssl_params, caplog): @pytest.mark.asyncio async def test_auth_failed_async(username, password, with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl - + listener = AccumulatingConnectionListener() with pytest.raises(AuthenticationError): - client = AioClient(username=username, password=password, **ssl_params) + client = AioClient(username=username, password=password, + event_listeners=[listener], **ssl_params) async with client.connect("127.0.0.1", 10801): pass - __assert_auth_failed_log(caplog) + __assert_auth_failed_log(caplog) + __assert_auth_failed_listener(listener) def __assert_auth_failed_log(caplog): pattern = r'Authentication failed while connecting to node\(address=127.0.0.1,\s+port=10801' - assert any(re.match(pattern, r.message) and r.levelname == logging.ERROR for r in caplog.records) + assert any(re.match(pattern, r.message) and r.levelname == logging.getLevelName(logging.ERROR) + for r in caplog.records) + + +def __assert_auth_failed_listener(listener): + found = False + for ev in listener.events: + if isinstance(ev, AuthenticationFailedEvent): + found = True + assert ev.host == '127.0.0.1' + assert ev.port == 10801 + assert ev.protocol_context + assert 'AuthenticationError' in ev.error_msg + assert found diff --git a/tests/security/test_ssl.py b/tests/security/test_ssl.py index 2cbed4b..ed0808b 100644 --- a/tests/security/test_ssl.py +++ b/tests/security/test_ssl.py @@ -17,8 +17,9 @@ import pytest -from pyignite import Client, AioClient +from pyignite import Client, AioClient, monitoring from pyignite.exceptions import ReconnectError +from tests.security.conftest import AccumulatingConnectionListener from tests.util import start_ignite_gen, get_or_create_cache, get_or_create_cache_async @@ -76,25 +77,41 @@ async def inner_async(): @pytest.mark.parametrize('invalid_ssl_params', invalid_params) def test_connection_error_with_incorrect_config(invalid_ssl_params, caplog): + listener = AccumulatingConnectionListener() with pytest.raises(ReconnectError): - client = Client(**invalid_ssl_params) + client = Client(event_listeners=[listener], **invalid_ssl_params) with client.connect([("127.0.0.1", 10801)]): pass - __assert_handshake_failed_log(caplog) + __assert_handshake_failed_log(caplog) + __assert_handshake_failed_listener(listener) @pytest.mark.parametrize('invalid_ssl_params', invalid_params) @pytest.mark.asyncio async def test_connection_error_with_incorrect_config_async(invalid_ssl_params, caplog): + listener = AccumulatingConnectionListener() with pytest.raises(ReconnectError): - client = AioClient(**invalid_ssl_params) + client = AioClient(event_listeners=[listener], **invalid_ssl_params) async with client.connect([("127.0.0.1", 10801)]): pass - __assert_handshake_failed_log(caplog) + __assert_handshake_failed_log(caplog) + __assert_handshake_failed_listener(listener) def __assert_handshake_failed_log(caplog): pattern = r'Failed to perform handshake, connection to node\(address=127.0.0.1,\s+port=10801.*failed:' - assert any(re.match(pattern, r.message) and r.levelname == logging.ERROR for r in caplog.records) + assert any(re.match(pattern, r.message) and r.levelname == logging.getLevelName(logging.ERROR) + for r in caplog.records) + + +def __assert_handshake_failed_listener(listener): + found = False + for ev in listener.events: + if isinstance(ev, monitoring.HandshakeFailedEvent): + found = True + assert ev.host == '127.0.0.1' + assert ev.port == 10801 + assert ev.error_msg + assert found From de07126cc4af51a04c12f6033609755a92da6d53 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 22 Jul 2021 09:18:47 +0300 Subject: [PATCH 50/62] IGNITE-15118 Implement handshake timeout - Fixes #47. --- .travis.yml | 2 +- examples/transactions.py | 6 + pyignite/aio_client.py | 35 +++- pyignite/client.py | 39 ++++- pyignite/connection/aio_connection.py | 48 +++--- pyignite/connection/connection.py | 59 ++++--- pyignite/connection/protocol_context.py | 3 + pyignite/transaction.py | 2 +- tests/common/test_query_listener.py | 18 +- tests/conftest.py | 5 - tests/custom/test_cluster.py | 2 +- tests/custom/test_connection_events.py | 31 ++-- tests/custom/test_handshake_timeout.py | 212 ++++++++++++++++++++++++ tests/security/test_auth.py | 3 +- tox.ini | 7 + 15 files changed, 394 insertions(+), 78 deletions(-) create mode 100644 tests/custom/test_handshake_timeout.py diff --git a/.travis.yml b/.travis.yml index 74909b8..2cd3e2b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -51,4 +51,4 @@ jobs: env: TOXENV=py39 install: pip install tox -script: tox \ No newline at end of file +script: tox diff --git a/examples/transactions.py b/examples/transactions.py index 53da05f..53e9c30 100644 --- a/examples/transactions.py +++ b/examples/transactions.py @@ -130,6 +130,12 @@ def sync_example(): if __name__ == '__main__': + client = Client() + with client.connect('127.0.0.1', 10800): + if not client.protocol_context.is_transactions_supported(): + print("'Transactions' API is not supported by cluster. Finishing...") + exit(0) + print("Starting sync example") sync_example() diff --git a/pyignite/aio_client.py b/pyignite/aio_client.py index 083c964..b6ded74 100644 --- a/pyignite/aio_client.py +++ b/pyignite/aio_client.py @@ -65,6 +65,9 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = True, """ Initialize client. + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + :param compact_footer: (optional) use compact (True, recommended) or full (False) schema approach when serializing Complex objects. Default is to use the same approach the server is using (None). @@ -73,7 +76,37 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = True, :param partition_aware: (optional) try to calculate the exact data placement from the key before to issue the key operation to the server node, `True` by default, - :param event_listeners: (optional) event listeners. + :param event_listeners: (optional) event listeners, + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0 seconds, + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.2, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. """ super().__init__(compact_footer, partition_aware, event_listeners, **kwargs) self._registry_mux = asyncio.Lock() diff --git a/pyignite/client.py b/pyignite/client.py index e3dd71b..397c52e 100644 --- a/pyignite/client.py +++ b/pyignite/client.py @@ -346,6 +346,9 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = True, """ Initialize client. + For the use of the SSL-related parameters see + https://docs.python.org/3/library/ssl.html#ssl-certificates. + :param compact_footer: (optional) use compact (True, recommended) or full (False) schema approach when serializing Complex objects. Default is to use the same approach the server is using (None). @@ -354,7 +357,41 @@ def __init__(self, compact_footer: bool = None, partition_aware: bool = True, :param partition_aware: (optional) try to calculate the exact data placement from the key before to issue the key operation to the server node, `True` by default, - :param event_listeners: (optional) event listeners. + :param event_listeners: (optional) event listeners, + :param timeout: (optional) sets timeout (in seconds) for each socket + operation including `connect`. 0 means non-blocking mode, which is + virtually guaranteed to fail. Can accept integer or float value. + Default is None (blocking mode), + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0 seconds, + :param use_ssl: (optional) set to True if Ignite server uses SSL + on its binary connector. Defaults to use SSL when username + and password has been supplied, not to use SSL otherwise, + :param ssl_version: (optional) SSL version constant from standard + `ssl` module. Defaults to TLS v1.2, + :param ssl_ciphers: (optional) ciphers to use. If not provided, + `ssl` default ciphers are used, + :param ssl_cert_reqs: (optional) determines how the remote side + certificate is treated: + + * `ssl.CERT_NONE` − remote certificate is ignored (default), + * `ssl.CERT_OPTIONAL` − remote certificate will be validated, + if provided, + * `ssl.CERT_REQUIRED` − valid remote certificate is required, + + :param ssl_keyfile: (optional) a path to SSL key file to identify + local (client) party, + :param ssl_keyfile_password: (optional) password for SSL key file, + can be provided when key file is encrypted to prevent OpenSSL + password prompt, + :param ssl_certfile: (optional) a path to ssl certificate file + to identify local (client) party, + :param ssl_ca_certfile: (optional) a path to a trusted certificate + or a certificate chain. Required to check the validity of the remote + (server-side) certificate, + :param username: (optional) user name to authenticate to Ignite + cluster, + :param password: (optional) password to authenticate to Ignite cluster. """ super().__init__(compact_footer, partition_aware, event_listeners, **kwargs) diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index 89de49d..4d13d6e 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -118,11 +118,13 @@ def __init__(self, client: 'AioClient', host: str, port: int, username: str = No :param client: Ignite client object, :param host: Ignite server node's host name or IP, :param port: Ignite server node's port number, + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0 seconds, :param use_ssl: (optional) set to True if Ignite server uses SSL on its binary connector. Defaults to use SSL when username and password has been supplied, not to use SSL otherwise, :param ssl_version: (optional) SSL version constant from standard - `ssl` module. Defaults to TLS v1.1, as in Ignite 2.5, + `ssl` module. Defaults to TLS v1.2, :param ssl_ciphers: (optional) ciphers to use. If not provided, `ssl` default ciphers are used, :param ssl_cert_reqs: (optional) determines how the remote side @@ -165,7 +167,6 @@ async def connect(self): """ if self.alive: return - self._closed = False await self._connect() async def _connect(self): @@ -176,27 +177,28 @@ async def _connect(self): detecting_protocol = True self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) - try: - self._on_handshake_start() - result = await self._connect_version() - except HandshakeError as e: - if e.expected_version in PROTOCOLS: - self.client.protocol_context.version = e.expected_version + while True: + try: + self._on_handshake_start() result = await self._connect_version() - else: + self._on_handshake_success(result) + return + except HandshakeError as e: + if e.expected_version in PROTOCOLS: + self.client.protocol_context.version = e.expected_version + continue + else: + self._on_handshake_fail(e) + raise e + except AuthenticationError as e: self._on_handshake_fail(e) raise e - except AuthenticationError as e: - self._on_handshake_fail(e) - raise e - except Exception as e: - self._on_handshake_fail(e) - # restore undefined protocol version - if detecting_protocol: - self.client.protocol_context = None - raise e - - self._on_handshake_success(result) + except Exception as e: + self._on_handshake_fail(e) + # restore undefined protocol version + if detecting_protocol: + self.client.protocol_context = None + raise e def process_connection_lost(self, err, reconnect=False): self.failed = True @@ -225,9 +227,13 @@ async def _connect_version(self) -> Union[dict, OrderedDict]: ssl_context = create_ssl_context(self.ssl_params) handshake_fut = self._loop.create_future() + self._closed = False self._transport, _ = await self._loop.create_connection(lambda: BaseProtocol(self, handshake_fut), host=self.host, port=self.port, ssl=ssl_context) - hs_response = await handshake_fut + try: + hs_response = await asyncio.wait_for(handshake_fut, self.handshake_timeout) + except asyncio.TimeoutError: + raise ConnectionError('timed out') if hs_response.op_code == 0: await self.close() diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 2b9970a..98ba7e0 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -19,7 +19,7 @@ from typing import Union from pyignite.constants import PROTOCOLS, IGNITE_DEFAULT_HOST, IGNITE_DEFAULT_PORT, PROTOCOL_BYTE_ORDER -from pyignite.exceptions import HandshakeError, SocketError, connection_errors, AuthenticationError +from pyignite.exceptions import HandshakeError, SocketError, connection_errors, AuthenticationError, ParameterError from .bitmask_feature import BitmaskFeature from .handshake import HandshakeRequest, HandshakeResponse @@ -34,14 +34,18 @@ class BaseConnection: def __init__(self, client, host: str = None, port: int = None, username: str = None, password: str = None, - **ssl_params): + handshake_timeout: float = 10.0, **ssl_params): self.client = client + self.handshake_timeout = handshake_timeout self.host = host if host else IGNITE_DEFAULT_HOST self.port = port if port else IGNITE_DEFAULT_PORT self.username = username self.password = password self.uuid = None + if handshake_timeout <= 0.0: + raise ParameterError("handshake_timeout should be positive") + check_ssl_params(ssl_params) if self.username and self.password and 'use_ssl' not in ssl_params: @@ -162,8 +166,9 @@ class Connection(BaseConnection): * binary protocol connector. Encapsulates handshake and failover reconnection. """ - def __init__(self, client: 'Client', host: str, port: int, timeout: float = None, - username: str = None, password: str = None, **ssl_params): + def __init__(self, client: 'Client', host: str, port: int, username: str = None, password: str = None, + timeout: float = None, handshake_timeout: float = 10.0, + **ssl_params): """ Initialize connection. @@ -177,11 +182,13 @@ def __init__(self, client: 'Client', host: str, port: int, timeout: float = None operation including `connect`. 0 means non-blocking mode, which is virtually guaranteed to fail. Can accept integer or float value. Default is None (blocking mode), + :param handshake_timeout: (optional) sets timeout (in seconds) for performing handshake (connection) + with node. Default is 10.0. :param use_ssl: (optional) set to True if Ignite server uses SSL on its binary connector. Defaults to use SSL when username and password has been supplied, not to use SSL otherwise, :param ssl_version: (optional) SSL version constant from standard - `ssl` module. Defaults to TLS v1.1, as in Ignite 2.5, + `ssl` module. Defaults to TLS v1.2, :param ssl_ciphers: (optional) ciphers to use. If not provided, `ssl` default ciphers are used, :param ssl_cert_reqs: (optional) determines how the remote side @@ -206,7 +213,7 @@ def __init__(self, client: 'Client', host: str, port: int, timeout: float = None cluster, :param password: (optional) password to authenticate to Ignite cluster. """ - super().__init__(client, host, port, username, password, **ssl_params) + super().__init__(client, host, port, username, password, handshake_timeout, **ssl_params) self.timeout = timeout self._socket = None @@ -225,27 +232,29 @@ def connect(self): detecting_protocol = True self.client.protocol_context = ProtocolContext(max(PROTOCOLS), BitmaskFeature.all_supported()) - try: - self._on_handshake_start() - result = self._connect_version() - except HandshakeError as e: - if e.expected_version in PROTOCOLS: - self.client.protocol_context.version = e.expected_version + while True: + try: + self._on_handshake_start() result = self._connect_version() - else: + self._socket.settimeout(self.timeout) + self._on_handshake_success(result) + return + except HandshakeError as e: + if e.expected_version in PROTOCOLS: + self.client.protocol_context.version = e.expected_version + continue + else: + self._on_handshake_fail(e) + raise e + except AuthenticationError as e: self._on_handshake_fail(e) raise e - except AuthenticationError as e: - self._on_handshake_fail(e) - raise e - except Exception as e: - self._on_handshake_fail(e) - # restore undefined protocol version - if detecting_protocol: - self.client.protocol_context = None - raise e - - self._on_handshake_success(result) + except Exception as e: + self._on_handshake_fail(e) + # restore undefined protocol version + if detecting_protocol: + self.client.protocol_context = None + raise e def _connect_version(self) -> Union[dict, OrderedDict]: """ @@ -254,7 +263,7 @@ def _connect_version(self) -> Union[dict, OrderedDict]: """ self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self._socket.settimeout(self.timeout) + self._socket.settimeout(self.handshake_timeout) self._socket = wrap(self._socket, self.ssl_params) self._socket.connect((self.host, self.port)) diff --git a/pyignite/connection/protocol_context.py b/pyignite/connection/protocol_context.py index ba6d9e4..f60d45b 100644 --- a/pyignite/connection/protocol_context.py +++ b/pyignite/connection/protocol_context.py @@ -40,6 +40,9 @@ def __eq__(self, other): def __str__(self): return f'ProtocolContext(version={self._version}, features={self._features})' + def __repr__(self): + return self.__str__() + def _ensure_consistency(self): if not self.is_feature_flags_supported(): self._features = None diff --git a/pyignite/transaction.py b/pyignite/transaction.py index eb77f8d..3003eb6 100644 --- a/pyignite/transaction.py +++ b/pyignite/transaction.py @@ -23,7 +23,7 @@ def _validate_int_enum_param(value: Union[int, IntEnum], cls: Type[IntEnum]): - if value not in cls: + if value not in set(v.value for v in cls): # Use this trick to disable warning on python 3.7 raise ValueError(f'{value} not in {cls}') return value diff --git a/tests/common/test_query_listener.py b/tests/common/test_query_listener.py index afff542..8310117 100644 --- a/tests/common/test_query_listener.py +++ b/tests/common/test_query_listener.py @@ -17,7 +17,7 @@ from pyignite import Client, AioClient from pyignite.exceptions import CacheError from pyignite.monitoring import QueryEventListener, QueryStartEvent, QueryFailEvent, QuerySuccessEvent -from pyignite.queries.op_codes import OP_CACHE_PUT, OP_CACHE_PARTITIONS, OP_CLUSTER_GET_STATE +from pyignite.queries.op_codes import OP_CACHE_PUT, OP_CACHE_PARTITIONS, OP_CACHE_GET_NAMES events = [] @@ -93,17 +93,17 @@ def __assert_fail_events(client): assert ev.port == conn.port assert ev.node_uuid == str(conn.uuid if conn.uuid else '') assert 'Cache does not exist' in ev.err_msg - assert ev.duration > 0 + assert ev.duration >= 0 def test_query_success_events(client): - client.get_cluster().get_state() + client.get_cache_names() __assert_success_events(client) @pytest.mark.asyncio async def test_query_success_events_async(async_client): - await async_client.get_cluster().get_state() + await async_client.get_cache_names() __assert_success_events(async_client) @@ -112,16 +112,16 @@ def __assert_success_events(client): conn = client._nodes[0] for ev in events: if isinstance(ev, QueryStartEvent): - assert ev.op_code == OP_CLUSTER_GET_STATE - assert ev.op_name == 'OP_CLUSTER_GET_STATE' + assert ev.op_code == OP_CACHE_GET_NAMES + assert ev.op_name == 'OP_CACHE_GET_NAMES' assert ev.host == conn.host assert ev.port == conn.port assert ev.node_uuid == str(conn.uuid if conn.uuid else '') if isinstance(ev, QuerySuccessEvent): - assert ev.op_code == OP_CLUSTER_GET_STATE - assert ev.op_name == 'OP_CLUSTER_GET_STATE' + assert ev.op_code == OP_CACHE_GET_NAMES + assert ev.op_name == 'OP_CACHE_GET_NAMES' assert ev.host == conn.host assert ev.port == conn.port assert ev.node_uuid == str(conn.uuid if conn.uuid else '') - assert ev.duration > 0 + assert ev.duration >= 0 diff --git a/tests/conftest.py b/tests/conftest.py index 70995a2..6f92f0c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,11 +20,6 @@ logger = logging.getLogger('pyignite') logger.setLevel(logging.DEBUG) -handler = logging.StreamHandler(stream=sys.stdout) -handler.setFormatter( - logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') -) -logger.addHandler(handler) @pytest.fixture(autouse=True) diff --git a/tests/custom/test_cluster.py b/tests/custom/test_cluster.py index e94853a..ae83ecd 100644 --- a/tests/custom/test_cluster.py +++ b/tests/custom/test_cluster.py @@ -49,7 +49,7 @@ def cluster_api_supported(request, server1): client = Client() with client.connect('127.0.0.1', 10801): if not client.protocol_context.is_cluster_api_supported(): - pytest.skip(f'skipped {request.node.name}, ExpiryPolicy APIis not supported.') + pytest.skip(f'skipped {request.node.name}, Cluster API is not supported.') def test_cluster_set_active(with_persistence): diff --git a/tests/custom/test_connection_events.py b/tests/custom/test_connection_events.py index bee9395..f49ad61 100644 --- a/tests/custom/test_connection_events.py +++ b/tests/custom/test_connection_events.py @@ -12,11 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import random - import pytest from pyignite import Client, AioClient +from pyignite.datatypes.cache_config import CacheMode +from pyignite.datatypes.prop_codes import PROP_NAME, PROP_CACHE_MODE from pyignite.monitoring import ConnectionEventListener, ConnectionLostEvent, ConnectionClosedEvent, \ HandshakeSuccessEvent, HandshakeFailedEvent, HandshakeStartEvent @@ -65,12 +65,16 @@ def test_events(request, server2): with client.connect([('127.0.0.1', 10800 + idx) for idx in range(1, 3)]): protocol_context = client.protocol_context nodes = {conn.port: conn for conn in client._nodes} - cache = client.get_or_create_cache(request.node.name) + cache = client.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) + kill_process_tree(server2.pid) - while True: + for _ in range(0, 100): try: - cache.put(random.randint(0, 1000), 1) + cache.put(1, 1) except: # noqa 13 pass @@ -86,12 +90,15 @@ async def test_events_async(request, server2): async with client.connect([('127.0.0.1', 10800 + idx) for idx in range(1, 3)]): protocol_context = client.protocol_context nodes = {conn.port: conn for conn in client._nodes} - cache = await client.get_or_create_cache(request.node.name) + cache = await client.get_or_create_cache({ + PROP_NAME: request.node.name, + PROP_CACHE_MODE: CacheMode.REPLICATED, + }) kill_process_tree(server2.pid) - while True: + for _ in range(0, 100): try: - await cache.put(random.randint(0, 1000), 1) + await cache.put(1, 1) except: # noqa 13 pass @@ -104,7 +111,7 @@ async def test_events_async(request, server2): def __assert_events(nodes, protocol_context): assert len([e for e in events if isinstance(e, ConnectionLostEvent)]) == 1 # ConnectionLostEvent is a subclass of ConnectionClosedEvent - assert len([e for e in events if type(e) == ConnectionClosedEvent]) == 1 + assert 1 <= len([e for e in events if type(e) == ConnectionClosedEvent and e.node_uuid]) <= 2 assert len([e for e in events if isinstance(e, HandshakeSuccessEvent)]) == 2 for ev in events: @@ -114,7 +121,6 @@ def __assert_events(nodes, protocol_context): assert ev.node_uuid == str(nodes[ev.port].uuid) assert ev.error_msg elif isinstance(ev, HandshakeStartEvent): - assert ev.protocol_context == protocol_context assert ev.port in {10801, 10802} elif isinstance(ev, HandshakeFailedEvent): assert ev.port == 10802 @@ -125,5 +131,6 @@ def __assert_events(nodes, protocol_context): assert ev.node_uuid == str(nodes[ev.port].uuid) assert ev.protocol_context == protocol_context elif isinstance(ev, ConnectionClosedEvent): - assert ev.port == 10801 - assert ev.node_uuid == str(nodes[ev.port].uuid) + assert ev.port in {10801, 10802} + if ev.node_uuid: # Possible if protocol negotiation occurred. + assert ev.node_uuid == str(nodes[ev.port].uuid) diff --git a/tests/custom/test_handshake_timeout.py b/tests/custom/test_handshake_timeout.py new file mode 100644 index 0000000..bae184d --- /dev/null +++ b/tests/custom/test_handshake_timeout.py @@ -0,0 +1,212 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import logging +import socket +import struct +import time +from concurrent.futures import ThreadPoolExecutor + +import pytest + +from pyignite import Client, AioClient +from pyignite import monitoring +from pyignite.exceptions import ReconnectError, ParameterError +from pyignite.monitoring import HandshakeFailedEvent + +logger = logging.getLogger('fake_ignite') +logger.setLevel(logging.DEBUG) + +DEFAULT_HOST = '127.0.0.1' +DEFAULT_PORT = 10800 + + +class FakeIgniteProtocol(asyncio.Protocol): + def __init__(self, server): + self._transport = None + self._server = server + self._buf = bytearray() + self._done_handshake = False + + def connection_made(self, transport): + sock = transport.get_extra_info('socket') + if sock is not None: + logger.debug('Connecting from %s', sock) + self._server.add_client(transport) + self._transport = transport + + def _handshake_response(self, error=True): + if error: + return struct.pack(' (1, 3, 0): + response = self._handshake_response(True) + logger.debug(f'Writing handshake response {response}') + self._transport.write(response) + self._transport.close() + else: + response = self._handshake_response(False) + logger.debug(f'Writing handshake response {response}') + self._transport.write(response) + self._done_handshake = True + self._buf = bytearray() + + +class FakeIgniteServer: + def __init__(self, do_handshake=False): + self.clients = [] + self.server = None + self.do_handshake = do_handshake + self.loop = asyncio.get_event_loop() + + async def start(self): + self.server = await self.loop.create_server(lambda: FakeIgniteProtocol(self), DEFAULT_HOST, DEFAULT_PORT) + + def add_client(self, client): + self.clients.append(client) + + async def close(self): + for client in self.clients: + client.close() + + if self.server: + self.server.close() + await self.server.wait_closed() + + +class HandshakeTimeoutListener(monitoring.ConnectionEventListener): + def __init__(self): + self.events = [] + + def on_handshake_fail(self, event: HandshakeFailedEvent): + self.events.append(event) + + +@pytest.fixture +async def server(): + server = FakeIgniteServer() + try: + await server.start() + yield server + finally: + await server.close() + + +@pytest.fixture +async def server_with_handshake(): + server = FakeIgniteServer(do_handshake=True) + try: + await server.start() + yield server + finally: + await server.close() + + +@pytest.mark.asyncio +async def test_handshake_timeout(server, event_loop): + def sync_client_connect(): + hs_to_listener = HandshakeTimeoutListener() + client = Client(handshake_timeout=3.0, event_listeners=[hs_to_listener]) + start = time.monotonic() + try: + client.connect(DEFAULT_HOST, DEFAULT_PORT) + except Exception as e: + return time.monotonic() - start, hs_to_listener.events, e + return time.monotonic() - start, hs_to_listener.events, None + + duration, events, err = await event_loop.run_in_executor(ThreadPoolExecutor(), sync_client_connect) + + assert isinstance(err, ReconnectError) + assert 3.0 <= duration < 4.0 + assert len(events) > 0 + for ev in events: + assert isinstance(ev, HandshakeFailedEvent) + assert 'timed out' in ev.error_msg + + +@pytest.mark.asyncio +async def test_handshake_timeout_async(server): + hs_to_listener = HandshakeTimeoutListener() + client = AioClient(handshake_timeout=3.0, event_listeners=[hs_to_listener]) + with pytest.raises(ReconnectError): + start = time.monotonic() + await client.connect(DEFAULT_HOST, DEFAULT_PORT) + + assert 3.0 <= time.monotonic() - start < 4.0 + assert len(hs_to_listener.events) > 0 + for ev in hs_to_listener.events: + assert isinstance(ev, HandshakeFailedEvent) + assert 'timed out' in ev.error_msg + + +@pytest.mark.asyncio +async def test_socket_timeout_applied_sync(server_with_handshake, event_loop): + def sync_client_connect(): + hs_to_listener = HandshakeTimeoutListener() + client = Client(timeout=5.0, handshake_timeout=3.0, event_listeners=[hs_to_listener]) + start = time.monotonic() + try: + client.connect(DEFAULT_HOST, DEFAULT_PORT) + assert all(n.alive for n in client._nodes) + client.get_cache_names() + except Exception as e: + return time.monotonic() - start, hs_to_listener.events, e + return time.monotonic() - start, hs_to_listener.events, None + + duration, events, err = await event_loop.run_in_executor(ThreadPoolExecutor(), sync_client_connect) + + assert isinstance(err, socket.timeout) + assert 5.0 <= duration < 6.0 + assert len(events) == 0 + + +@pytest.mark.asyncio +async def test_handshake_timeout_not_affected_for_others_requests_async(server_with_handshake): + hs_to_listener = HandshakeTimeoutListener() + client = AioClient(handshake_timeout=3.0, event_listeners=[hs_to_listener]) + with pytest.raises(asyncio.TimeoutError): + await client.connect(DEFAULT_HOST, DEFAULT_PORT) + assert all(n.alive for n in client._nodes) + await asyncio.wait_for(client.get_cache_names(), 5.0) + + +@pytest.mark.parametrize( + 'handshake_timeout', + [0.0, -10.0, -0.01] +) +@pytest.mark.asyncio +async def test_handshake_timeout_param_validation(handshake_timeout): + with pytest.raises(ParameterError): + await AioClient(handshake_timeout=handshake_timeout).connect(DEFAULT_HOST, DEFAULT_PORT) + + with pytest.raises(ParameterError): + Client(handshake_timeout=handshake_timeout).connect(DEFAULT_HOST, DEFAULT_PORT) diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py index 503cf88..83ac780 100644 --- a/tests/security/test_auth.py +++ b/tests/security/test_auth.py @@ -95,7 +95,8 @@ def __assert_successful_connect_events(conn, listener): assert ev.host == conn.host assert ev.port == conn.port if isinstance(ev, (HandshakeSuccessEvent, ConnectionClosedEvent)): - assert ev.node_uuid == str(conn.uuid if conn.uuid else '') + if ev.node_uuid: + assert ev.node_uuid == str(conn.uuid) if isinstance(ev, HandshakeSuccessEvent): assert ev.protocol_context diff --git a/tox.ini b/tox.ini index 90153da..964b748 100644 --- a/tox.ini +++ b/tox.ini @@ -17,6 +17,13 @@ skipsdist = True envlist = codestyle,py{36,37,38,39} +[pytest] +log_format = %(asctime)s %(name)s %(levelname)s %(message)s +log_date_format = %Y-%m-%d %H:%M:%S +# Uncomment if you want verbose logging for all tests (for failed it will be printed anyway). +# log_cli = True +# log_cli_level = DEBUG + [flake8] max-line-length=120 ignore = F401,F403,F405,F821 From 05dc38e64dc736caab90d365d30f1fe49248a199 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 22 Jul 2021 09:29:40 +0300 Subject: [PATCH 51/62] Add release notes for 0.5.1 --- README.md | 2 +- RELEASE_NOTES.txt | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 3f35643..2a936d6 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ pip install pyignite To install a specific version: ```bash -pip install pyignite==0.5.0 +pip install pyignite==0.5.1 ``` ## Documentation diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt index 9d2ae81..c6cbd41 100644 --- a/RELEASE_NOTES.txt +++ b/RELEASE_NOTES.txt @@ -1,6 +1,15 @@ Apache Ignite python thin client ================================ +0.5.1 +-------------------------------- +* Added logging of connection and queries events +* Added event listeners to connection events and query events +* Added client's side handshake timeout +* Fixed excessive deprecation warnings on python 3.7 +* Fixed request to failed node when querying replicated cache +* Fixed excessive partition mapping requests + 0.5.0 -------------------------------- * Added transaction API support (sync and async versions, async version supports only python 3.7+) From be18440ea2d81a053ad8a8d031bf1652f0b8e92a Mon Sep 17 00:00:00 2001 From: Bojidar Marinov Date: Fri, 6 Aug 2021 13:43:52 +0300 Subject: [PATCH 52/62] IGNITE-15266 Fix nested object arrays deserialization - Fixes #48. Signed-off-by: Ivan Daschinsky --- pyignite/utils.py | 3 ++- tests/common/test_datatypes.py | 35 ++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/pyignite/utils.py b/pyignite/utils.py index 427cceb..5fcbd38 100644 --- a/pyignite/utils.py +++ b/pyignite/utils.py @@ -66,7 +66,8 @@ def is_hinted(value): """ Check if a value is a tuple of data item and its type hint. """ - return isinstance(value, tuple) and len(value) == 2 and issubclass(value[1], IgniteDataType) + return isinstance(value, tuple) and len(value) == 2 and inspect.isclass(value[1]) and \ + issubclass(value[1], IgniteDataType) def int_overflow(value: int) -> int: diff --git a/tests/common/test_datatypes.py b/tests/common/test_datatypes.py index ebbafb6..3a0ee51 100644 --- a/tests/common/test_datatypes.py +++ b/tests/common/test_datatypes.py @@ -166,6 +166,41 @@ async def test_put_get_data_async(async_cache, value, value_hint): assert await async_cache.get('my_key') == value +nested_array_objects_params = [ + [ + (ObjectArrayObject.OBJECT, [ + ((ObjectArrayObject.OBJECT, [ + 'test', 1, Value(1, 'test'), + ((ObjectArrayObject.OBJECT, ['test', 1, Value(1, 'test')]), ObjectArrayObject) + ]), ObjectArrayObject) + ]), + (ObjectArrayObject.OBJECT, [ + (ObjectArrayObject.OBJECT, ['test', 1, Value(1, 'test'), + (ObjectArrayObject.OBJECT, ['test', 1, Value(1, 'test')])]) + ]) + ], +] + + +@pytest.mark.parametrize( + 'hinted_value, value', + nested_array_objects_params +) +def test_put_get_nested_array_objects(cache, hinted_value, value): + cache.put('my_key', hinted_value, value_hint=ObjectArrayObject) + assert cache.get('my_key') == value + + +@pytest.mark.parametrize( + 'hinted_value, value', + nested_array_objects_params +) +@pytest.mark.asyncio +async def test_put_get_nested_array_objects_async(async_cache, hinted_value, value): + await async_cache.put('my_key', hinted_value, value_hint=ObjectArrayObject) + assert await async_cache.get('my_key') == value + + bytearray_params = [ ([1, 2, 3, 5], ByteArrayObject), ((7, 8, 13, 18), ByteArrayObject), From ef8687e03f4335ff58501447a3c0c0a3c9142c44 Mon Sep 17 00:00:00 2001 From: Nikolay Date: Mon, 6 Sep 2021 18:20:54 +0300 Subject: [PATCH 53/62] Fix tests local run with tests/__init__.py added (#49) --- tests/__init__.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 tests/__init__.py diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..03803a9 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,14 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. From 3bf1cc1ad9e56a3b74a9abbb8a586495afb40169 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 9 Sep 2021 12:26:25 +0300 Subject: [PATCH 54/62] IGNITE-15479 Fix incorrect partial read from socket in sync client - Fixes #50. --- pyignite/connection/connection.py | 31 +++++++++++------------ tests/common/test_sync_socket.py | 42 +++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 16 deletions(-) create mode 100644 tests/common/test_sync_socket.py diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 98ba7e0..3d86f01 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -156,6 +156,9 @@ def _connection_listener(self): return self.client._event_listeners +DEFAULT_INITIAL_BUF_SIZE = 1024 + + class Connection(BaseConnection): """ This is a `pyignite` class, that represents a connection to Ignite @@ -348,15 +351,15 @@ def recv(self, flags=None, reconnect=True) -> bytearray: if flags is not None: kwargs['flags'] = flags - data = bytearray(1024) + data = bytearray(DEFAULT_INITIAL_BUF_SIZE) buffer = memoryview(data) - bytes_total_received, bytes_to_receive = 0, 0 + total_rcvd, packet_len = 0, 0 while True: try: - bytes_received = self._socket.recv_into(buffer, len(buffer), **kwargs) - if bytes_received == 0: + bytes_rcvd = self._socket.recv_into(buffer, len(buffer), **kwargs) + if bytes_rcvd == 0: raise SocketError('Connection broken.') - bytes_total_received += bytes_received + total_rcvd += bytes_rcvd except connection_errors as e: self.failed = True if reconnect: @@ -364,23 +367,19 @@ def recv(self, flags=None, reconnect=True) -> bytearray: self.reconnect() raise e - if bytes_total_received < 4: - continue - elif bytes_to_receive == 0: - response_len = int.from_bytes(data[0:4], PROTOCOL_BYTE_ORDER) - bytes_to_receive = response_len - - if response_len + 4 > len(data): + if packet_len == 0 and total_rcvd > 4: + packet_len = int.from_bytes(data[0:4], PROTOCOL_BYTE_ORDER, signed=True) + 4 + if packet_len > len(data): buffer.release() - data.extend(bytearray(response_len + 4 - len(data))) - buffer = memoryview(data)[bytes_total_received:] + data.extend(bytearray(packet_len - len(data))) + buffer = memoryview(data)[total_rcvd:] continue - if bytes_total_received >= bytes_to_receive: + if 0 < packet_len <= total_rcvd: buffer.release() break - buffer = buffer[bytes_received:] + buffer = buffer[bytes_rcvd:] return data diff --git a/tests/common/test_sync_socket.py b/tests/common/test_sync_socket.py new file mode 100644 index 0000000..cd41809 --- /dev/null +++ b/tests/common/test_sync_socket.py @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import secrets +import socket +import unittest.mock as mock + +import pytest + +from pyignite import Client +from tests.util import get_or_create_cache + +old_recv_into = socket.socket.recv_into + + +def patched_recv_into_factory(buf_len): + def patched_recv_into(self, buffer, nbytes, **kwargs): + return old_recv_into(self, buffer, min(nbytes, buf_len) if buf_len else nbytes, **kwargs) + return patched_recv_into + + +@pytest.mark.parametrize('buf_len', [0, 1, 4, 16, 32, 64, 128, 256, 512, 1024]) +def test_get_large_value(buf_len): + with mock.patch.object(socket.socket, 'recv_into', new=patched_recv_into_factory(buf_len)): + c = Client() + with c.connect("127.0.0.1", 10801): + with get_or_create_cache(c, 'test') as cache: + value = secrets.token_hex((1 << 16) + 1) + cache.put(1, value) + assert value == cache.get(1) From abbff023a9641eac214998989fa9c3ce862e208f Mon Sep 17 00:00:00 2001 From: Ivan Daschinskiy Date: Mon, 21 Feb 2022 14:52:28 +0300 Subject: [PATCH 55/62] IGNITE-16599 Fix tests by setting strict version of MarkupSafe (#51) --- .travis.yml | 2 +- requirements/tests.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2cd3e2b..a52c5b2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ addons: env: global: - - IGNITE_VERSION=2.9.1 + - IGNITE_VERSION=2.12.0 - IGNITE_HOME=/opt/ignite before_install: diff --git a/requirements/tests.txt b/requirements/tests.txt index 38a8e9e..a84975e 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -7,4 +7,5 @@ pytest-asyncio==0.14.0 teamcity-messages==1.28 psutil==5.8.0 jinja2==2.11.3 +markupsafe==2.0.1 flake8==3.8.4 From a1a920e6722360f298405f8d0cf2b22d35f8019a Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Thu, 9 Sep 2021 13:26:24 +0300 Subject: [PATCH 56/62] Add release notes for 0.5.2 (cherry picked from commit 1222f29abca4c44a8a2f23e413eafb6acd332e76) --- RELEASE_NOTES.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt index c6cbd41..a67624a 100644 --- a/RELEASE_NOTES.txt +++ b/RELEASE_NOTES.txt @@ -1,6 +1,11 @@ Apache Ignite python thin client ================================ +0.5.2 +-------------------------------- +* Fixed incorrect partial read from socket in sync client +* Fixed nested object arrays deserialization + 0.5.1 -------------------------------- * Added logging of connection and queries events From ac12197224794bc62d1be35718408632d7afdbd2 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Thu, 26 May 2022 05:42:02 -0700 Subject: [PATCH 57/62] IGNITE-17023 Remove Python 3.6, add 3.10. This closes #52 --- .gitignore | 1 + .travis.yml | 8 ++++---- README.md | 6 +++--- docs/readme.rst | 4 ++-- docs/source/pyignite.connection.protocol_context.rst | 2 +- examples/docker-compose.yml | 8 +++----- examples/readme.md | 6 +++++- pyignite/datatypes/standard.py | 4 ++-- pyignite/monitoring.py | 2 -- requirements/docs.txt | 2 +- requirements/tests.txt | 2 +- scripts/BuildWheels.ps1 | 2 +- scripts/build_wheels.sh | 2 +- scripts/create_distr.sh | 4 ++-- scripts/create_sdist.sh | 2 +- setup.py | 4 ++-- tox.ini | 2 +- 17 files changed, 31 insertions(+), 30 deletions(-) diff --git a/.gitignore b/.gitignore index 14ec495..7576fcd 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ junit*.xml pyignite.egg-info ignite-log-* __pycache__ +venv diff --git a/.travis.yml b/.travis.yml index a52c5b2..45f26f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ addons: env: global: - - IGNITE_VERSION=2.12.0 + - IGNITE_VERSION=2.13.0 - IGNITE_HOME=/opt/ignite before_install: @@ -34,9 +34,6 @@ before_install: jobs: include: - - python: '3.6' - arch: amd64 - env: TOXENV=py36 - python: '3.7' arch: amd64 env: TOXENV=py37 @@ -49,6 +46,9 @@ jobs: - python: '3.9' arch: amd64 env: TOXENV=py39 + - python: '3.10' + arch: amd64 + env: TOXENV=py310 install: pip install tox script: tox diff --git a/README.md b/README.md index 2a936d6..be5fa7b 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Apache Ignite thin (binary protocol) client, written in Python 3. ## Prerequisites -- Python 3.6 or above (3.6, 3.7, 3.8 and 3.9 are tested), +- Python 3.7 or above (3.7, 3.8, 3.9 and 3.10 are tested), - Access to Apache Ignite node, local or remote. The current thin client version was tested on Apache Ignite 2.10 (binary client protocol 1.7.0). @@ -45,12 +45,12 @@ There is an optional C extension to speedup some computational intensive tasks. ***NB!* Docker is required.** -- On Windows MSVC 14.x required, and it should be in path, also python versions 3.6, 3.7, 3.8 and 3.9 both for x86 and +- On Windows MSVC 14.x required, and it should be in path, also python versions 3.7, 3.8, 3.9 and 3.10 both for x86 and x86-64 should be installed. You can disable some of these versions but you'd need to edit script for that. - For building `wheels` for Windows, invoke script `.\scripts\BuildWheels.ps1` using PowerShell. Just make sure that your execution policy allows execution of scripts in your environment. - Ready wheels for `x86` and `x86-64` for different python versions (3.6, 3.7, 3.8 and 3.9) will be + Ready wheels for `x86` and `x86-64` for different python versions (3.7, 3.8, 3.9 and 3.10) will be located in `distr` directory. ### Updating from older version diff --git a/docs/readme.rst b/docs/readme.rst index 5fc76a7..17eb4b5 100644 --- a/docs/readme.rst +++ b/docs/readme.rst @@ -35,9 +35,9 @@ through a raw TCP socket. Prerequisites ------------- -- *Python 3.6* or above (3.6, 3.7, 3.8 and 3.9 are tested), +- *Python 3.7* or above (3.7, 3.8, 3.9 and 3.10 are tested), - Access to *Apache Ignite* node, local or remote. The current thin client - version was tested on *Apache Ignite 2.10.0* (binary client protocol 1.7.0). + version was tested on *Apache Ignite 2.13.0* (binary client protocol 1.7.0). Installation ------------ diff --git a/docs/source/pyignite.connection.protocol_context.rst b/docs/source/pyignite.connection.protocol_context.rst index a5298ba..1ec3c81 100644 --- a/docs/source/pyignite.connection.protocol_context.rst +++ b/docs/source/pyignite.connection.protocol_context.rst @@ -14,7 +14,7 @@ limitations under the License. pyignite.connection.protocol_context package -=========================== +============================================ .. automodule:: pyignite.connection.protocol_context :members: \ No newline at end of file diff --git a/examples/docker-compose.yml b/examples/docker-compose.yml index 76c91b3..e2dd178 100644 --- a/examples/docker-compose.yml +++ b/examples/docker-compose.yml @@ -13,24 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +version: "3" services: ignite_0: image: apacheignite/ignite:latest ports: - 10800:10800 restart: always - network_mode: host ignite_1: image: apacheignite/ignite:latest ports: - - 10800:10801 + - 10801:10800 restart: always - network_mode: host ignite_2: image: apacheignite/ignite:latest ports: - - 10800:10802 + - 10802:10800 restart: always - network_mode: host diff --git a/examples/readme.md b/examples/readme.md index 8fd4848..ebc6b7b 100644 --- a/examples/readme.md +++ b/examples/readme.md @@ -2,12 +2,16 @@ This directory contains the following example files: +- `async_key_value` - asynchronous key-value operations, +- `async_sql` - asynchronous SQL operations, - `binary_basics.py` − basic operations with Complex objects, -- `binary_types.py` - read SQL table as a key-value cache, - `create_binary.py` − create SQL row with key-value operation, +- `expiry_policy.py` - the expiration policy for caches for synchronous and asynchronous operations is demonstrated, - `failover.py` − fail-over connection to Ignite cluster, - `get_and_put.py` − basic key-value operations, +- `get_and_put_complex.py` − key-value operations with different value and key types, - `migrate_binary.py` − work with Complex object schemas, +- `read_binary.py` − creates caches and fills them with data through SQL queries, demonstrates working with binary objects, - `scans.py` − cache scan operation, - `sql.py` − use Ignite SQL, - `type_hints.py` − type hints. diff --git a/pyignite/datatypes/standard.py b/pyignite/datatypes/standard.py index 9173daa..9357e8f 100644 --- a/pyignite/datatypes/standard.py +++ b/pyignite/datatypes/standard.py @@ -18,7 +18,7 @@ import decimal from io import SEEK_CUR from math import ceil -from typing import Tuple +from typing import Tuple, Union import uuid from pyignite.constants import * @@ -365,7 +365,7 @@ def build_c_type(cls): return cls._object_c_type @classmethod - def from_python_not_null(cls, stream, value: [date, datetime], **kwargs): + def from_python_not_null(cls, stream, value: Union[date, datetime], **kwargs): if type(value) is date: value = datetime.combine(value, time()) data_type = cls.build_c_type() diff --git a/pyignite/monitoring.py b/pyignite/monitoring.py index 9bbfd20..997a5f8 100644 --- a/pyignite/monitoring.py +++ b/pyignite/monitoring.py @@ -56,8 +56,6 @@ def on_query_success(self, event): .. note:: Debug logging is also available, standard ``logging`` is used. Just set ``DEBUG`` level to *pyignite* logger. -| -| """ from typing import Optional, Sequence diff --git a/requirements/docs.txt b/requirements/docs.txt index 962f07f..d088fff 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -3,5 +3,5 @@ -r install.txt wheel==0.36.2 -Sphinx==1.7.5 +Sphinx==4.3.2 sphinxcontrib-fulltoc==1.2.0 \ No newline at end of file diff --git a/requirements/tests.txt b/requirements/tests.txt index a84975e..7262fe9 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -6,6 +6,6 @@ pytest-cov==2.11.1 pytest-asyncio==0.14.0 teamcity-messages==1.28 psutil==5.8.0 -jinja2==2.11.3 +jinja2==3.0.3 markupsafe==2.0.1 flake8==3.8.4 diff --git a/scripts/BuildWheels.ps1 b/scripts/BuildWheels.ps1 index cf7424e..9098d58 100644 --- a/scripts/BuildWheels.ps1 +++ b/scripts/BuildWheels.ps1 @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -$PyVers="36","37","38","39" +$PyVers="37","38","39","310" [System.Collections.ArrayList]$PyVersFull = $PyVers foreach ($Ver in $PyVers) diff --git a/scripts/build_wheels.sh b/scripts/build_wheels.sh index cf5f760..b30c3b7 100755 --- a/scripts/build_wheels.sh +++ b/scripts/build_wheels.sh @@ -27,7 +27,7 @@ function repair_wheel { # Compile wheels for PYBIN in /opt/python/*/bin; do - if [[ $PYBIN =~ ^(.*)cp3[6789](.*)$ ]]; then + if [[ $PYBIN =~ ^(.*)cp3[7891](.*)$ ]]; then "${PYBIN}/pip" wheel /pyignite/ --no-deps -w /wheels fi done diff --git a/scripts/create_distr.sh b/scripts/create_distr.sh index 5732aba..b86ac1e 100755 --- a/scripts/create_distr.sh +++ b/scripts/create_distr.sh @@ -16,7 +16,7 @@ DISTR_DIR="$(pwd)/distr/" SRC_DIR="$(pwd)" -DEFAULT_DOCKER_IMAGE="quay.io/pypa/manylinux1_x86_64" +DEFAULT_DOCKER_IMAGE="quay.io/pypa/manylinux2010_x86_64" usage() { cat < Date: Mon, 6 Jun 2022 11:25:46 -0700 Subject: [PATCH 58/62] IGNITE-17054 Fix examples and documentation (#53) --- docs/async_examples.rst | 48 ++--- docs/examples.rst | 90 ++++----- examples/async_key_value.py | 19 +- examples/async_sql.py | 248 ++++-------------------- examples/binary_basics.py | 14 +- examples/create_binary.py | 23 +-- examples/expiry_policy.py | 27 ++- examples/get_and_put_complex.py | 8 +- examples/helpers/converters.py | 5 + examples/helpers/sql_helper.py | 193 ++++++++++++++++++ examples/migrate_binary.py | 103 +++++----- examples/read_binary.py | 334 +++++++++----------------------- examples/scans.py | 4 +- examples/sql.py | 266 ++++--------------------- examples/transactions.py | 11 +- examples/type_hints.py | 3 +- 16 files changed, 550 insertions(+), 846 deletions(-) create mode 100644 examples/helpers/converters.py create mode 100644 examples/helpers/sql_helper.py diff --git a/docs/async_examples.rst b/docs/async_examples.rst index 4ce65ce..644fcfe 100644 --- a/docs/async_examples.rst +++ b/docs/async_examples.rst @@ -32,14 +32,14 @@ Firstly, import dependencies. .. literalinclude:: ../examples/async_key_value.py :language: python - :lines: 18 + :lines: 19 Let's connect to cluster and perform key-value queries. .. literalinclude:: ../examples/async_key_value.py :language: python :dedent: 4 - :lines: 23-38 + :lines: 23-47 Scan ==== @@ -49,7 +49,7 @@ that yields the resulting rows. .. literalinclude:: ../examples/async_key_value.py :language: python :dedent: 8 - :lines: 39-50 + :lines: 49-60 ExpiryPolicy ============ @@ -63,12 +63,12 @@ in cache settings dictionary on creation. .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 73-76 + :lines: 74-77 .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 82-90 + :lines: 83-91 Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use :py:meth:`~pyignite.cache.BaseCache.with_expire_policy` @@ -76,7 +76,7 @@ Secondly, expiry policy can be set for all cache operations, which are done unde .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 97-106 + :lines: 98-107 Transactions ------------ @@ -132,41 +132,44 @@ First let us establish a connection. .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 4 - :lines: 197-198 + :lines: 24-25 Then create tables. Begin with `Country` table, than proceed with related tables `City` and `CountryLanguage`. -.. literalinclude:: ../examples/async_sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 25-42, 51-59, 67-74 + :dedent: 4 + :lines: 27-43, 53-60, 68-74 .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 4 - :lines: 199-205 + :lines: 27-32 Create indexes. -.. literalinclude:: ../examples/async_sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 60-62, 75-77 + :dedent: 4 + :lines: 62, 76 .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 8 - :lines: 207-209 + :lines: 35-36 Fill tables with data. -.. literalinclude:: ../examples/async_sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 43-50, 63-66, 78-81 + :dedent: 4 + :lines: 45-51, 64-66, 78-80 .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 8 - :lines: 212-223 + :lines: 39-49 Now let us answer some questions. @@ -176,7 +179,7 @@ What are the 10 largest cities in our data sample (population-wise)? .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 8 - :lines: 225-243 + :lines: 52-66 The :py:meth:`~pyignite.aio_client.AioClient.sql` method returns :py:class:`~pyignite.cursors.AioSqlFieldsCursor`, that yields the resulting rows. @@ -193,7 +196,7 @@ of :py:class:`~pyignite.cursors.AioSqlFieldsCursor` .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 8 - :lines: 246-271 + :lines: 69-95 Display all the information about a given city ============================================== @@ -201,18 +204,19 @@ Display all the information about a given city .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 8 - :lines: 273-288 + :lines: 98-110 Finally, delete the tables used in this example with the following queries: -.. literalinclude:: ../examples/async_sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 83 + :dedent: 4 + :lines: 82 .. literalinclude:: ../examples/async_sql.py :language: python :dedent: 8 - :lines: 290-297 + :lines: 113-115 diff --git a/docs/examples.rst b/docs/examples.rst index 4ca0910..8f40b91 100644 --- a/docs/examples.rst +++ b/docs/examples.rst @@ -71,7 +71,7 @@ File: `type_hints.py`_ .. literalinclude:: ../examples/type_hints.py :language: python :dedent: 4 - :lines: 24-48 + :lines: 23-47 As a rule of thumb: @@ -97,12 +97,12 @@ in cache settings dictionary on creation. .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 32-35 + :lines: 33-36 .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 41-47 + :lines: 42-48 Secondly, expiry policy can be set for all cache operations, which are done under decorator. To create it use :py:meth:`~pyignite.cache.BaseCache.with_expire_policy` @@ -110,7 +110,7 @@ Secondly, expiry policy can be set for all cache operations, which are done unde .. literalinclude:: ../examples/expiry_policy.py :language: python :dedent: 12 - :lines: 54-61 + :lines: 55-62 Scan ==== @@ -124,7 +124,7 @@ Let us put some data in cache. .. literalinclude:: ../examples/scans.py :language: python :dedent: 4 - :lines: 20-29 + :lines: 22-31 :py:meth:`~pyignite.cache.Cache.scan` returns a cursor, that yields two-tuples of key and value. You can iterate through the generated pairs @@ -133,14 +133,14 @@ in a safe manner: .. literalinclude:: ../examples/scans.py :language: python :dedent: 4 - :lines: 31-39 + :lines: 33-41 Or, alternatively, you can convert the cursor to dictionary in one go: .. literalinclude:: ../examples/scans.py :language: python :dedent: 4 - :lines: 41-50 + :lines: 43-52 But be cautious: if the cache contains a large set of data, the dictionary may consume too much memory! @@ -158,7 +158,7 @@ each of the collection type. Second comes the data value. .. literalinclude:: ../examples/get_and_put_complex.py :language: python - :lines: 19 + :lines: 17 Map === @@ -175,7 +175,7 @@ Since CPython 3.6 all dictionaries became de facto ordered. You can always use .. literalinclude:: ../examples/get_and_put_complex.py :language: python :dedent: 4 - :lines: 26-38 + :lines: 22-36 Collection ========== @@ -192,7 +192,7 @@ and you always get `list` back. .. literalinclude:: ../examples/get_and_put_complex.py :language: python :dedent: 4 - :lines: 40-54 + :lines: 38-52 Object array ============ @@ -204,7 +204,7 @@ contents. But it still can be used for interoperability with Java. .. literalinclude:: ../examples/get_and_put_complex.py :language: python :dedent: 4 - :lines: 56-65 + :lines: 54-63 Transactions @@ -265,41 +265,44 @@ First let us establish a connection. .. literalinclude:: ../examples/sql.py :language: python - :lines: 195-196 + :lines: 20-21 Then create tables. Begin with `Country` table, than proceed with related tables `City` and `CountryLanguage`. -.. literalinclude:: ../examples/sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 25-42, 51-59, 67-74 + :dedent: 4 + :lines: 27-43, 53-60, 68-74 .. literalinclude:: ../examples/sql.py :language: python :dedent: 4 - :lines: 199-204 + :lines: 23-28 Create indexes. -.. literalinclude:: ../examples/sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 60-62, 75-77 + :dedent: 4 + :lines: 62, 76 .. literalinclude:: ../examples/sql.py :language: python :dedent: 4 - :lines: 207-208 + :lines: 31-32 Fill tables with data. -.. literalinclude:: ../examples/sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 43-50, 63-66, 78-81 + :dedent: 4 + :lines: 45-51, 64-66, 78-80 .. literalinclude:: ../examples/sql.py :language: python :dedent: 4 - :lines: 211-218 + :lines: 35-42 Data samples are taken from `PyIgnite GitHub repository`_. @@ -311,7 +314,7 @@ What are the 10 largest cities in our data sample (population-wise)? .. literalinclude:: ../examples/sql.py :language: python :dedent: 4 - :lines: 24, 221-238 + :lines: 45-59 The :py:meth:`~pyignite.client.Client.sql` method returns a generator, that yields the resulting rows. @@ -327,7 +330,7 @@ column names as a first yield. You can access field names with Python built-in .. literalinclude:: ../examples/sql.py :language: python :dedent: 4 - :lines: 241-266 + :lines: 62-88 Display all the information about a given city ============================================== @@ -335,18 +338,18 @@ Display all the information about a given city .. literalinclude:: ../examples/sql.py :language: python :dedent: 4 - :lines: 268-283 + :lines: 92-103 Finally, delete the tables used in this example with the following queries: -.. literalinclude:: ../examples/sql.py +.. literalinclude:: ../examples/helpers/sql_helper.py :language: python - :lines: 82-83 + :lines: 82 .. literalinclude:: ../examples/sql.py :language: python :dedent: 4 - :lines: 285-291 + :lines: 106-107 .. _complex_object_usage: @@ -389,7 +392,7 @@ automatically when reading Complex objects. .. literalinclude:: ../examples/binary_basics.py :language: python :dedent: 4 - :lines: 32-34, 39-42, 48-49 + :lines: 36-38, 40-43, 45-46 Here you can see how :class:`~pyignite.binary.GenericObjectMeta` uses `attrs`_ package internally for creating nice `__init__()` and `__repr__()` @@ -416,14 +419,14 @@ Anyway, you can reuse the autogenerated dataclass for subsequent writes: .. literalinclude:: ../examples/binary_basics.py :language: python :dedent: 4 - :lines: 52, 33-37 + :lines: 50, 32-34 :class:`~pyignite.binary.GenericObjectMeta` can also be used directly for creating custom classes: .. literalinclude:: ../examples/binary_basics.py :language: python - :lines: 18-27 + :lines: 20-25 Note how the `Person` class is defined. `schema` is a :class:`~pyignite.binary.GenericObjectMeta` metaclass parameter. @@ -443,7 +446,7 @@ register said class explicitly with your client: .. literalinclude:: ../examples/binary_basics.py :language: python :dedent: 4 - :lines: 50 + :lines: 48 Now, when we dealt with the basics of `pyignite` implementation of Complex Objects, let us move on to more elaborate examples. @@ -465,7 +468,7 @@ Let us do it again and examine the Ignite storage afterwards. .. literalinclude:: ../examples/read_binary.py :language: python :dedent: 4 - :lines: 222-229 + :lines: 49-51 We can see that Ignite created a cache for each of our tables. The caches are conveniently named using ‘`SQL__`’ pattern. @@ -476,7 +479,7 @@ using a :py:attr:`~pyignite.cache.Cache.settings` property. .. literalinclude:: ../examples/read_binary.py :language: python :dedent: 4 - :lines: 231-251 + :lines: 53-103 The values of `value_type_name` and `key_type_name` are names of the binary types. The `City` table's key fields are stored using `key_type_name` type, @@ -489,7 +492,7 @@ functions and verify the correctness of the result. .. literalinclude:: ../examples/read_binary.py :language: python :dedent: 4 - :lines: 253-267 + :lines: 106-115 What we see is a tuple of key and value, extracted from the cache. Both key and value are represent Complex objects. The dataclass names are the same @@ -525,28 +528,27 @@ These are the necessary steps to perform the task. .. literalinclude:: ../examples/create_binary.py :language: python :dedent: 4 - :lines: 24-63 + :lines: 31-69 2. Define Complex object data class. .. literalinclude:: ../examples/create_binary.py :language: python - :dedent: 4 - :lines: 64-75 + :lines: 21-26 3. Insert row. .. literalinclude:: ../examples/create_binary.py :language: python :dedent: 4 - :lines: 76-80 + :lines: 71-75 Now let us make sure that our cache really can be used with SQL functions. .. literalinclude:: ../examples/create_binary.py :language: python :dedent: 4 - :lines: 82-87 + :lines: 77-82 Note, however, that the cache we create can not be dropped with DDL command. It should be deleted as any other key-value cache. @@ -554,7 +556,7 @@ It should be deleted as any other key-value cache. .. literalinclude:: ../examples/create_binary.py :language: python :dedent: 4 - :lines: 89-96 + :lines: 84-91 Migrate ======= @@ -574,7 +576,7 @@ First get the vouchers' cache. .. literalinclude:: ../examples/migrate_binary.py :language: python :dedent: 4 - :lines: 111 + :lines: 109 If you do not store the schema of the Complex object in code, you can obtain it as a dataclass property with @@ -583,20 +585,20 @@ it as a dataclass property with .. literalinclude:: ../examples/migrate_binary.py :language: python :dedent: 4 - :lines: 116-120 + :lines: 115-119 Let us modify the schema and create a new Complex object class with an updated schema. .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 122-138 + :lines: 121-137 Now migrate the data from the old schema to the new one. .. literalinclude:: ../examples/migrate_binary.py :language: python - :lines: 141-190 + :lines: 140-190 At this moment all the fields, defined in both of our schemas, can be available in the resulting binary object, depending on which schema was used diff --git a/examples/async_key_value.py b/examples/async_key_value.py index 76dac34..7379874 100644 --- a/examples/async_key_value.py +++ b/examples/async_key_value.py @@ -14,6 +14,7 @@ # limitations under the License. import asyncio +from pprint import pprint from pyignite import AioClient @@ -32,9 +33,18 @@ async def main(): # Key-value queries. print(await cache.get('key_10')) - print(await cache.get_all([f'key_{i}' for i in range(0, 10)])) # value_10 - # {'key_3': 'value_3', 'key_2': 'value_2', 'key_1': 'value_1','....} + pprint(await cache.get_all([f'key_{i}' for i in range(0, 10)])) + # {'key_0': 'value_0', + # 'key_1': 'value_1', + # 'key_2': 'value_2', + # 'key_3': 'value_3', + # 'key_4': 'value_4', + # 'key_5': 'value_5', + # 'key_6': 'value_6', + # 'key_7': 'value_7', + # 'key_8': 'value_8', + # 'key_9': 'value_9'} # Scan query. async with cache.scan() as cursor: @@ -52,5 +62,6 @@ async def main(): # Clean up. await cache.destroy() -loop = asyncio.get_event_loop() -loop.run_until_complete(main()) + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/async_sql.py b/examples/async_sql.py index ffd2939..d8de9f6 100644 --- a/examples/async_sql.py +++ b/examples/async_sql.py @@ -14,220 +14,43 @@ # limitations under the License. import asyncio -from decimal import Decimal +from helpers.sql_helper import TableNames, Query, TestData from pyignite import AioClient -COUNTRY_TABLE_NAME = 'Country' -CITY_TABLE_NAME = 'City' -LANGUAGE_TABLE_NAME = 'CountryLanguage' - -COUNTRY_CREATE_TABLE_QUERY = '''CREATE TABLE Country ( - Code CHAR(3) PRIMARY KEY, - Name CHAR(52), - Continent CHAR(50), - Region CHAR(26), - SurfaceArea DECIMAL(10,2), - IndepYear SMALLINT(6), - Population INT(11), - LifeExpectancy DECIMAL(3,1), - GNP DECIMAL(10,2), - GNPOld DECIMAL(10,2), - LocalName CHAR(45), - GovernmentForm CHAR(45), - HeadOfState CHAR(60), - Capital INT(11), - Code2 CHAR(2) -)''' - -COUNTRY_INSERT_QUERY = '''INSERT INTO Country( - Code, Name, Continent, Region, - SurfaceArea, IndepYear, Population, - LifeExpectancy, GNP, GNPOld, - LocalName, GovernmentForm, HeadOfState, - Capital, Code2 -) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' - -CITY_CREATE_TABLE_QUERY = '''CREATE TABLE City ( - ID INT(11), - Name CHAR(35), - CountryCode CHAR(3), - District CHAR(20), - Population INT(11), - PRIMARY KEY (ID, CountryCode) -) WITH "affinityKey=CountryCode"''' - -CITY_CREATE_INDEX = ''' -CREATE INDEX idx_country_code ON city (CountryCode)''' - -CITY_INSERT_QUERY = '''INSERT INTO City( - ID, Name, CountryCode, District, Population -) VALUES (?, ?, ?, ?, ?)''' - -LANGUAGE_CREATE_TABLE_QUERY = '''CREATE TABLE CountryLanguage ( - CountryCode CHAR(3), - Language CHAR(30), - IsOfficial BOOLEAN, - Percentage DECIMAL(4,1), - PRIMARY KEY (CountryCode, Language) -) WITH "affinityKey=CountryCode"''' - -LANGUAGE_CREATE_INDEX = ''' -CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)''' - -LANGUAGE_INSERT_QUERY = '''INSERT INTO CountryLanguage( - CountryCode, Language, IsOfficial, Percentage -) VALUES (?, ?, ?, ?)''' - -DROP_TABLE_QUERY = '''DROP TABLE {} IF EXISTS''' - -COUNTRY_DATA = [ - [ - 'USA', 'United States', 'North America', 'North America', - Decimal('9363520.00'), 1776, 278357000, - Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), - 'United States', 'Federal Republic', 'George W. Bush', - 3813, 'US', - ], - [ - 'IND', 'India', 'Asia', 'Southern and Central Asia', - Decimal('3287263.00'), 1947, 1013662000, - Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), - 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', - 1109, 'IN', - ], - [ - 'CHN', 'China', 'Asia', 'Eastern Asia', - Decimal('9572900.00'), -1523, 1277558000, - Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), - 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', - 1891, 'CN', - ], -] - -CITY_DATA = [ - [3793, 'New York', 'USA', 'New York', 8008278], - [3794, 'Los Angeles', 'USA', 'California', 3694820], - [3795, 'Chicago', 'USA', 'Illinois', 2896016], - [3796, 'Houston', 'USA', 'Texas', 1953631], - [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], - [3798, 'Phoenix', 'USA', 'Arizona', 1321045], - [3799, 'San Diego', 'USA', 'California', 1223400], - [3800, 'Dallas', 'USA', 'Texas', 1188580], - [3801, 'San Antonio', 'USA', 'Texas', 1144646], - [3802, 'Detroit', 'USA', 'Michigan', 951270], - [3803, 'San Jose', 'USA', 'California', 894943], - [3804, 'Indianapolis', 'USA', 'Indiana', 791926], - [3805, 'San Francisco', 'USA', 'California', 776733], - [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], - [1025, 'Delhi', 'IND', 'Delhi', 7206704], - [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], - [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], - [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], - [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], - [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], - [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], - [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], - [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], - [1034, 'Pune', 'IND', 'Maharashtra', 1566651], - [1035, 'Surat', 'IND', 'Gujarat', 1498817], - [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], - [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], - [1891, 'Peking', 'CHN', 'Peking', 7472000], - [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], - [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], - [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], - [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], - [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], - [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], - [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], - [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], - [1900, 'Changchun', 'CHN', 'Jilin', 2812000], - [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], - [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], - [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], - [1904, 'Jinan', 'CHN', 'Shandong', 2278100], - [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], - [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], -] - -LANGUAGE_DATA = [ - ['USA', 'Chinese', False, Decimal('0.6')], - ['USA', 'English', True, Decimal('86.2')], - ['USA', 'French', False, Decimal('0.7')], - ['USA', 'German', False, Decimal('0.7')], - ['USA', 'Italian', False, Decimal('0.6')], - ['USA', 'Japanese', False, Decimal('0.2')], - ['USA', 'Korean', False, Decimal('0.3')], - ['USA', 'Polish', False, Decimal('0.3')], - ['USA', 'Portuguese', False, Decimal('0.2')], - ['USA', 'Spanish', False, Decimal('7.5')], - ['USA', 'Tagalog', False, Decimal('0.4')], - ['USA', 'Vietnamese', False, Decimal('0.2')], - ['IND', 'Asami', False, Decimal('1.5')], - ['IND', 'Bengali', False, Decimal('8.2')], - ['IND', 'Gujarati', False, Decimal('4.8')], - ['IND', 'Hindi', True, Decimal('39.9')], - ['IND', 'Kannada', False, Decimal('3.9')], - ['IND', 'Malajalam', False, Decimal('3.6')], - ['IND', 'Marathi', False, Decimal('7.4')], - ['IND', 'Orija', False, Decimal('3.3')], - ['IND', 'Punjabi', False, Decimal('2.8')], - ['IND', 'Tamil', False, Decimal('6.3')], - ['IND', 'Telugu', False, Decimal('7.8')], - ['IND', 'Urdu', False, Decimal('5.1')], - ['CHN', 'Chinese', True, Decimal('92.0')], - ['CHN', 'Dong', False, Decimal('0.2')], - ['CHN', 'Hui', False, Decimal('0.8')], - ['CHN', 'Mantšu', False, Decimal('0.9')], - ['CHN', 'Miao', False, Decimal('0.7')], - ['CHN', 'Mongolian', False, Decimal('0.4')], - ['CHN', 'Puyi', False, Decimal('0.2')], - ['CHN', 'Tibetan', False, Decimal('0.4')], - ['CHN', 'Tujia', False, Decimal('0.5')], - ['CHN', 'Uighur', False, Decimal('0.6')], - ['CHN', 'Yi', False, Decimal('0.6')], - ['CHN', 'Zhuang', False, Decimal('1.4')], -] - - async def main(): # establish connection client = AioClient() async with client.connect('127.0.0.1', 10800): # create tables for query in [ - COUNTRY_CREATE_TABLE_QUERY, - CITY_CREATE_TABLE_QUERY, - LANGUAGE_CREATE_TABLE_QUERY, + Query.COUNTRY_CREATE_TABLE, + Query.CITY_CREATE_TABLE, + Query.LANGUAGE_CREATE_TABLE, ]: await client.sql(query) # create indices - for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: + for query in [Query.CITY_CREATE_INDEX, Query.LANGUAGE_CREATE_INDEX]: await client.sql(query) # load data concurrently. await asyncio.gather(*[ - client.sql(COUNTRY_INSERT_QUERY, query_args=row) for row in COUNTRY_DATA + client.sql(Query.COUNTRY_INSERT, query_args=row) for row in TestData.COUNTRY ]) await asyncio.gather(*[ - client.sql(CITY_INSERT_QUERY, query_args=row) for row in CITY_DATA + client.sql(Query.CITY_INSERT, query_args=row) for row in TestData.CITY ]) await asyncio.gather(*[ - client.sql(LANGUAGE_INSERT_QUERY, query_args=row) for row in LANGUAGE_DATA + client.sql(Query.LANGUAGE_INSERT, query_args=row) for row in TestData.LANGUAGE ]) # 10 most populated cities (with pagination) - MOST_POPULATED_QUERY = ''' - SELECT name, population FROM City ORDER BY population DESC LIMIT 10''' - - async with client.sql(MOST_POPULATED_QUERY) as cursor: + async with client.sql('SELECT name, population FROM City ORDER BY population DESC LIMIT 10') as cursor: print('Most 10 populated cities:') - async for row in cursor: print(row) # Most 10 populated cities: @@ -241,39 +64,38 @@ async def main(): # ['Calcutta [Kolkata]', 4399819] # ['Wuhan', 4344600] # ['Harbin', 4289800] - + print('-' * 20) # 10 most populated cities in 3 countries (with pagination and header row) - MOST_POPULATED_IN_3_COUNTRIES_QUERY = ''' + most_populated_in_3_countries = ''' SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country JOIN city ON city.countrycode = country.code WHERE country.code IN ('USA','IND','CHN') GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 ''' - async with client.sql(MOST_POPULATED_IN_3_COUNTRIES_QUERY, include_field_names=True) as cursor: + async with client.sql(most_populated_in_3_countries, include_field_names=True) as cursor: print('Most 10 populated cities in USA, India and China:') - print(await cursor.__anext__()) - print('----------------------------------------') + table_str_pattern = '{:15}\t| {:20}\t| {}' + print(table_str_pattern.format(*await cursor.__anext__())) + print('*' * 50) async for row in cursor: - print(row) + print(table_str_pattern.format(*row)) # Most 10 populated cities in USA, India and China: - # ['COUNTRY_NAME', 'CITY_NAME', 'MAX_POP'] - # ---------------------------------------- - # ['India', 'Mumbai (Bombay)', 10500000] - # ['China', 'Shanghai', 9696300] - # ['United States', 'New York', 8008278] - # ['China', 'Peking', 7472000] - # ['India', 'Delhi', 7206704] - # ['China', 'Chongqing', 6351600] - # ['China', 'Tianjin', 5286800] - # ['India', 'Calcutta [Kolkata]', 4399819] - # ['China', 'Wuhan', 4344600] - # ['China', 'Harbin', 4289800] - + # COUNTRY_NAME | CITY_NAME | MAX_POP + # ************************************************** + # India | Mumbai (Bombay) | 10500000 + # China | Shanghai | 9696300 + # United States | New York | 8008278 + # China | Peking | 7472000 + # India | Delhi | 7206704 + # China | Chongqing | 6351600 + # China | Tianjin | 5286800 + # India | Calcutta [Kolkata] | 4399819 + # China | Wuhan | 4344600 + # China | Harbin | 4289800 + print('-' * 20) # show city info - CITY_INFO_QUERY = '''SELECT * FROM City WHERE id = ?''' - - async with client.sql(CITY_INFO_QUERY, query_args=[3802], include_field_names=True) as cursor: + async with client.sql('SELECT * FROM City WHERE id = ?', query_args=[3802], include_field_names=True) as cursor: field_names = await cursor.__anext__() field_data = await cursor.__anext__() @@ -289,13 +111,9 @@ async def main(): # clean up concurrently. await asyncio.gather(*[ - client.sql(DROP_TABLE_QUERY.format(table_name)) for table_name in [ - CITY_TABLE_NAME, - LANGUAGE_TABLE_NAME, - COUNTRY_TABLE_NAME, - ] + client.sql(Query.DROP_TABLE.format(table_name.value)) for table_name in TableNames ]) -loop = asyncio.get_event_loop() -loop.run_until_complete(main()) +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/binary_basics.py b/examples/binary_basics.py index 50fa933..835cdc4 100644 --- a/examples/binary_basics.py +++ b/examples/binary_basics.py @@ -13,17 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict - from pyignite import Client, GenericObjectMeta from pyignite.datatypes import String, IntObject -class Person(metaclass=GenericObjectMeta, schema=OrderedDict([ - ('first_name', String), - ('last_name', String), - ('age', IntObject), -])): +class Person(metaclass=GenericObjectMeta, schema={ + 'first_name': String, + 'last_name': String, + 'age': IntObject +}): pass @@ -50,3 +48,5 @@ class Person(metaclass=GenericObjectMeta, schema=OrderedDict([ client.register_binary_type(Person) Person = person.__class__ + # cleanup + person_cache.destroy() diff --git a/examples/create_binary.py b/examples/create_binary.py index d2c2ce4..d0047f5 100644 --- a/examples/create_binary.py +++ b/examples/create_binary.py @@ -13,12 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict - from pyignite import Client, GenericObjectMeta from pyignite.datatypes import DoubleObject, IntObject, String from pyignite.datatypes.prop_codes import PROP_NAME, PROP_SQL_SCHEMA, PROP_QUERY_ENTITIES + +class Student( + metaclass=GenericObjectMeta, + type_name='SQL_PUBLIC_STUDENT_TYPE', + schema={'NAME': String, 'LOGIN': String, 'AGE': IntObject, 'GPA': DoubleObject} +): + pass + + client = Client() with client.connect('127.0.0.1', 10800): student_cache = client.create_cache({ @@ -61,18 +68,6 @@ ], }) - class Student( - metaclass=GenericObjectMeta, - type_name='SQL_PUBLIC_STUDENT_TYPE', - schema=OrderedDict([ - ('NAME', String), - ('LOGIN', String), - ('AGE', IntObject), - ('GPA', DoubleObject), - ]) - ): - pass - student_cache.put( 1, Student(LOGIN='jdoe', NAME='John Doe', AGE=17, GPA=4.25), diff --git a/examples/expiry_policy.py b/examples/expiry_policy.py index 3dbe54b..8482e51 100644 --- a/examples/expiry_policy.py +++ b/examples/expiry_policy.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import asyncio import time from datetime import timedelta @@ -22,7 +23,7 @@ from pyignite.exceptions import NotSupportedByClusterError -def main(): +def sync_actions(): print("Running sync ExpiryPolicy example.") client = Client() @@ -63,7 +64,7 @@ def main(): simple_cache.destroy() -async def async_main(): +async def async_actions(): print("Running async ExpiryPolicy example.") client = AioClient() @@ -107,8 +108,24 @@ async def async_main(): finally: await simple_cache.destroy() + if __name__ == '__main__': - main() + sync_actions() + print('-' * 20) + asyncio.run(async_actions()) - loop = asyncio.get_event_loop() - loop.run_until_complete(async_main()) +# Running sync ExpiryPolicy example. +# Create cache with expiry policy. +# key = 1, value = 1 +# key = 1, value = None +# Create simple Cache and set TTL through `with_expire_policy` +# key = 1, value = 1 +# key = 1, value = None +# -------------------- +# Running async ExpiryPolicy example. +# Create cache with expiry policy. +# key = 1, value = 1 +# key = 1, value = None +# Create simple Cache and set TTL through `with_expire_policy` +# key = 1, value = 1 +# key = 1, value = None diff --git a/examples/get_and_put_complex.py b/examples/get_and_put_complex.py index cff0c2f..0938379 100644 --- a/examples/get_and_put_complex.py +++ b/examples/get_and_put_complex.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict - from pyignite import Client from pyignite.datatypes import CollectionObject, MapObject, ObjectArrayObject @@ -23,19 +21,19 @@ with client.connect('127.0.0.1', 10800): my_cache = client.get_or_create_cache('my cache') - value = OrderedDict([(1, 'test'), ('key', 2.0)]) + value = {1: 'test', 'key': 2.0} # saving ordered dictionary type_id = MapObject.LINKED_HASH_MAP my_cache.put('my dict', (type_id, value)) result = my_cache.get('my dict') - print(result) # (2, OrderedDict([(1, 'test'), ('key', 2.0)])) + print(result) # (2, {1: 'test', 'key': 2.0}) # saving unordered dictionary type_id = MapObject.HASH_MAP my_cache.put('my dict', (type_id, value)) result = my_cache.get('my dict') - print(result) # (1, {'key': 2.0, 1: 'test'}) + print(result) # (1, {1: 'test', 'key': 2.0}) type_id = CollectionObject.LINKED_LIST value = [1, '2', 3.0] diff --git a/examples/helpers/converters.py b/examples/helpers/converters.py new file mode 100644 index 0000000..4122c49 --- /dev/null +++ b/examples/helpers/converters.py @@ -0,0 +1,5 @@ +def obj_to_dict(obj): + result = {'type_name': obj.type_name} + for data in obj.schema: + result.update({data: getattr(obj, data)}) + return result diff --git a/examples/helpers/sql_helper.py b/examples/helpers/sql_helper.py new file mode 100644 index 0000000..f13d2ed --- /dev/null +++ b/examples/helpers/sql_helper.py @@ -0,0 +1,193 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from decimal import Decimal +from enum import Enum + + +class TableNames(Enum): + COUNTRY_TABLE_NAME = 'Country' + CITY_TABLE_NAME = 'City' + LANGUAGE_TABLE_NAME = 'CountryLanguage' + + +class Query: + COUNTRY_CREATE_TABLE = '''CREATE TABLE Country ( + Code CHAR(3) PRIMARY KEY, + Name CHAR(52), + Continent CHAR(50), + Region CHAR(26), + SurfaceArea DECIMAL(10,2), + IndepYear SMALLINT(6), + Population INT(11), + LifeExpectancy DECIMAL(3,1), + GNP DECIMAL(10,2), + GNPOld DECIMAL(10,2), + LocalName CHAR(45), + GovernmentForm CHAR(45), + HeadOfState CHAR(60), + Capital INT(11), + Code2 CHAR(2) + )''' + + COUNTRY_INSERT = '''INSERT INTO Country( + Code, Name, Continent, Region, + SurfaceArea, IndepYear, Population, + LifeExpectancy, GNP, GNPOld, + LocalName, GovernmentForm, HeadOfState, + Capital, Code2 + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' + + CITY_CREATE_TABLE = '''CREATE TABLE City ( + ID INT(11), + Name CHAR(35), + CountryCode CHAR(3), + District CHAR(20), + Population INT(11), + PRIMARY KEY (ID, CountryCode) + ) WITH "affinityKey=CountryCode"''' + + CITY_CREATE_INDEX = 'CREATE INDEX idx_country_code ON city (CountryCode)' + + CITY_INSERT = '''INSERT INTO City( + ID, Name, CountryCode, District, Population + ) VALUES (?, ?, ?, ?, ?)''' + + LANGUAGE_CREATE_TABLE = '''CREATE TABLE CountryLanguage ( + CountryCode CHAR(3), + Language CHAR(30), + IsOfficial BOOLEAN, + Percentage DECIMAL(4,1), + PRIMARY KEY (CountryCode, Language) + ) WITH "affinityKey=CountryCode"''' + + LANGUAGE_CREATE_INDEX = 'CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)' + + LANGUAGE_INSERT = '''INSERT INTO CountryLanguage( + CountryCode, Language, IsOfficial, Percentage + ) VALUES (?, ?, ?, ?)''' + + DROP_TABLE = 'DROP TABLE {} IF EXISTS' + + +class TestData: + COUNTRY = [ + [ + 'USA', 'United States', 'North America', 'North America', + Decimal('9363520.00'), 1776, 278357000, + Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), + 'United States', 'Federal Republic', 'George W. Bush', + 3813, 'US', + ], + [ + 'IND', 'India', 'Asia', 'Southern and Central Asia', + Decimal('3287263.00'), 1947, 1013662000, + Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), + 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', + 1109, 'IN', + ], + [ + 'CHN', 'China', 'Asia', 'Eastern Asia', + Decimal('9572900.00'), -1523, 1277558000, + Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), + 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', + 1891, 'CN', + ], + ] + + CITY = [ + [3793, 'New York', 'USA', 'New York', 8008278], + [3794, 'Los Angeles', 'USA', 'California', 3694820], + [3795, 'Chicago', 'USA', 'Illinois', 2896016], + [3796, 'Houston', 'USA', 'Texas', 1953631], + [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], + [3798, 'Phoenix', 'USA', 'Arizona', 1321045], + [3799, 'San Diego', 'USA', 'California', 1223400], + [3800, 'Dallas', 'USA', 'Texas', 1188580], + [3801, 'San Antonio', 'USA', 'Texas', 1144646], + [3802, 'Detroit', 'USA', 'Michigan', 951270], + [3803, 'San Jose', 'USA', 'California', 894943], + [3804, 'Indianapolis', 'USA', 'Indiana', 791926], + [3805, 'San Francisco', 'USA', 'California', 776733], + [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], + [1025, 'Delhi', 'IND', 'Delhi', 7206704], + [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], + [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], + [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], + [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], + [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], + [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], + [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], + [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], + [1034, 'Pune', 'IND', 'Maharashtra', 1566651], + [1035, 'Surat', 'IND', 'Gujarat', 1498817], + [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], + [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], + [1891, 'Peking', 'CHN', 'Peking', 7472000], + [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], + [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], + [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], + [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], + [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], + [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], + [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], + [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], + [1900, 'Changchun', 'CHN', 'Jilin', 2812000], + [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], + [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], + [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], + [1904, 'Jinan', 'CHN', 'Shandong', 2278100], + [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], + [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], + ] + + LANGUAGE = [ + ['USA', 'Chinese', False, Decimal('0.6')], + ['USA', 'English', True, Decimal('86.2')], + ['USA', 'French', False, Decimal('0.7')], + ['USA', 'German', False, Decimal('0.7')], + ['USA', 'Italian', False, Decimal('0.6')], + ['USA', 'Japanese', False, Decimal('0.2')], + ['USA', 'Korean', False, Decimal('0.3')], + ['USA', 'Polish', False, Decimal('0.3')], + ['USA', 'Portuguese', False, Decimal('0.2')], + ['USA', 'Spanish', False, Decimal('7.5')], + ['USA', 'Tagalog', False, Decimal('0.4')], + ['USA', 'Vietnamese', False, Decimal('0.2')], + ['IND', 'Asami', False, Decimal('1.5')], + ['IND', 'Bengali', False, Decimal('8.2')], + ['IND', 'Gujarati', False, Decimal('4.8')], + ['IND', 'Hindi', True, Decimal('39.9')], + ['IND', 'Kannada', False, Decimal('3.9')], + ['IND', 'Malajalam', False, Decimal('3.6')], + ['IND', 'Marathi', False, Decimal('7.4')], + ['IND', 'Orija', False, Decimal('3.3')], + ['IND', 'Punjabi', False, Decimal('2.8')], + ['IND', 'Tamil', False, Decimal('6.3')], + ['IND', 'Telugu', False, Decimal('7.8')], + ['IND', 'Urdu', False, Decimal('5.1')], + ['CHN', 'Chinese', True, Decimal('92.0')], + ['CHN', 'Dong', False, Decimal('0.2')], + ['CHN', 'Hui', False, Decimal('0.8')], + ['CHN', 'Mantšu', False, Decimal('0.9')], + ['CHN', 'Miao', False, Decimal('0.7')], + ['CHN', 'Mongolian', False, Decimal('0.4')], + ['CHN', 'Puyi', False, Decimal('0.2')], + ['CHN', 'Tibetan', False, Decimal('0.4')], + ['CHN', 'Tujia', False, Decimal('0.5')], + ['CHN', 'Uighur', False, Decimal('0.6')], + ['CHN', 'Yi', False, Decimal('0.6')], + ['CHN', 'Zhuang', False, Decimal('1.4')], + ] diff --git a/examples/migrate_binary.py b/examples/migrate_binary.py index c22fa4f..0c7f518 100644 --- a/examples/migrate_binary.py +++ b/examples/migrate_binary.py @@ -13,76 +13,74 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import OrderedDict from datetime import date from decimal import Decimal +from pprint import pprint +from helpers.converters import obj_to_dict from pyignite import Client, GenericObjectMeta -from pyignite.datatypes import ( - BoolObject, DateObject, DecimalObject, LongObject, String, -) - +from pyignite.datatypes import BoolObject, DateObject, DecimalObject, LongObject, String # prepare old data -old_schema = OrderedDict([ - ('date', DateObject), - ('reported', BoolObject), - ('purpose', String), - ('sum', DecimalObject), - ('recipient', String), - ('cashier_id', LongObject), -]) - -old_data = [ - (1, { +old_schema = {'date': DateObject, + 'reported': BoolObject, + 'purpose': String, + 'sum': DecimalObject, + 'recipient': String, + 'cashier_id': LongObject + } + +old_data = { + 1: { 'date': date(2017, 9, 21), 'reported': True, 'purpose': 'Praesent eget fermentum massa', 'sum': Decimal('666.67'), 'recipient': 'John Doe', 'cashier_id': 8, - }), - (2, { + }, + 2: { 'date': date(2017, 10, 11), 'reported': True, 'purpose': 'Proin in bibendum nulla', 'sum': Decimal('333.33'), 'recipient': 'Jane Roe', 'cashier_id': 9, - }), - (3, { + }, + 3: { 'date': date(2017, 10, 11), 'reported': True, 'purpose': 'Suspendisse nec dolor auctor, scelerisque ex eu, iaculis odio', 'sum': Decimal('400.0'), 'recipient': 'Jane Roe', 'cashier_id': 8, - }), - (4, { + }, + 4: { 'date': date(2017, 10, 24), 'reported': False, 'purpose': 'Quisque ut leo ligula', 'sum': Decimal('1234.5'), 'recipient': 'Joe Bloggs', 'cashier_id': 10, - }), - (5, { + }, + 5: { 'date': date(2017, 12, 1), 'reported': True, 'purpose': 'Quisque ut leo ligula', 'sum': Decimal('800.0'), 'recipient': 'Richard Public', 'cashier_id': 12, - }), - (6, { + }, + 6: { 'date': date(2017, 12, 1), 'reported': True, 'purpose': 'Aenean eget bibendum lorem, a luctus libero', 'sum': Decimal('135.79'), 'recipient': 'Joe Bloggs', 'cashier_id': 10, - }), -] + } +} + # - add `report_date` # - set `report_date` to the current date if `reported` is True, None if False @@ -110,13 +108,14 @@ class ExpenseVoucher( with client.connect('127.0.0.1', 10800): accounting = client.get_or_create_cache('accounting') - for key, value in old_data: - accounting.put(key, ExpenseVoucher(**value)) + for item, value in old_data.items(): + print(item) + accounting.put(item, ExpenseVoucher(**value)) data_classes = client.query_binary_type('ExpenseVoucher') print(data_classes) # { - # -231598180: + # {547629991: , -231598180: } # } s_id, data_class = data_classes.popitem() @@ -142,16 +141,16 @@ def migrate(cache, data, new_class): """ Migrate given data pages. """ for key, old_value in data: # read data - print(old_value) - # ExpenseVoucher( - # date=datetime(2017, 9, 21, 0, 0), - # reported=True, - # purpose='Praesent eget fermentum massa', - # sum=Decimal('666.67'), - # recipient='John Doe', - # cashier_id=8, - # version=1 - # ) + print('Old value:') + pprint(obj_to_dict(old_value)) + # Old value: + # {'cashier_id': 10, + # 'date': datetime.datetime(2017, 12, 1, 0, 0), + # 'purpose': 'Aenean eget bibendum lorem, a luctus libero', + # 'recipient': 'Joe Bloggs', + # 'reported': True, + # 'sum': Decimal('135.79'), + # 'type_name': 'ExpenseVoucher'} # create new binary object new_value = new_class() @@ -169,16 +168,18 @@ def migrate(cache, data, new_class): # verify data verify = cache.get(key) - print(verify) - # ExpenseVoucherV2( - # purpose='Praesent eget fermentum massa', - # sum=Decimal('666.67'), - # recipient='John Doe', - # cashier_id=8, - # expense_date=datetime(2017, 9, 21, 0, 0), - # report_date=datetime(2018, 8, 29, 0, 0), - # version=1, - # ) + print('New value:') + pprint(obj_to_dict(verify)) + # New value: + # {'cashier_id': 10, + # 'expense_date': datetime.datetime(2017, 12, 1, 0, 0), + # 'purpose': 'Aenean eget bibendum lorem, a luctus libero', + # 'recipient': 'Joe Bloggs', + # 'report_date': datetime.datetime(2022, 5, 6, 0, 0), + # 'sum': Decimal('135.79'), + # 'type_name': 'ExpenseVoucher'} + + print('-' * 20) # migrate data diff --git a/examples/read_binary.py b/examples/read_binary.py index fe642d8..92404ca 100644 --- a/examples/read_binary.py +++ b/examples/read_binary.py @@ -13,279 +13,119 @@ # See the License for the specific language governing permissions and # limitations under the License. -from decimal import Decimal +from pprint import pprint +from helpers.converters import obj_to_dict +from helpers.sql_helper import TableNames, Query, TestData from pyignite import Client from pyignite.datatypes.prop_codes import PROP_NAME, PROP_QUERY_ENTITIES - -COUNTRY_TABLE_NAME = 'Country' -CITY_TABLE_NAME = 'City' -LANGUAGE_TABLE_NAME = 'CountryLanguage' - -COUNTRY_CREATE_TABLE_QUERY = '''CREATE TABLE Country ( - Code CHAR(3) PRIMARY KEY, - Name CHAR(52), - Continent CHAR(50), - Region CHAR(26), - SurfaceArea DECIMAL(10,2), - IndepYear SMALLINT(6), - Population INT(11), - LifeExpectancy DECIMAL(3,1), - GNP DECIMAL(10,2), - GNPOld DECIMAL(10,2), - LocalName CHAR(45), - GovernmentForm CHAR(45), - HeadOfState CHAR(60), - Capital INT(11), - Code2 CHAR(2) -)''' - -COUNTRY_INSERT_QUERY = '''INSERT INTO Country( - Code, Name, Continent, Region, - SurfaceArea, IndepYear, Population, - LifeExpectancy, GNP, GNPOld, - LocalName, GovernmentForm, HeadOfState, - Capital, Code2 -) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' - -CITY_CREATE_TABLE_QUERY = '''CREATE TABLE City ( - ID INT(11), - Name CHAR(35), - CountryCode CHAR(3), - District CHAR(20), - Population INT(11), - PRIMARY KEY (ID, CountryCode) -) WITH "affinityKey=CountryCode"''' - -CITY_CREATE_INDEX = ''' -CREATE INDEX idx_country_code ON city (CountryCode)''' - -CITY_INSERT_QUERY = '''INSERT INTO City( - ID, Name, CountryCode, District, Population -) VALUES (?, ?, ?, ?, ?)''' - -LANGUAGE_CREATE_TABLE_QUERY = '''CREATE TABLE CountryLanguage ( - CountryCode CHAR(3), - Language CHAR(30), - IsOfficial BOOLEAN, - Percentage DECIMAL(4,1), - PRIMARY KEY (CountryCode, Language) -) WITH "affinityKey=CountryCode"''' - -LANGUAGE_CREATE_INDEX = ''' -CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)''' - -LANGUAGE_INSERT_QUERY = '''INSERT INTO CountryLanguage( - CountryCode, Language, IsOfficial, Percentage -) VALUES (?, ?, ?, ?)''' - -DROP_TABLE_QUERY = '''DROP TABLE {} IF EXISTS''' - -COUNTRY_DATA = [ - [ - 'USA', 'United States', 'North America', 'North America', - Decimal('9363520.00'), 1776, 278357000, - Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), - 'United States', 'Federal Republic', 'George W. Bush', - 3813, 'US', - ], - [ - 'IND', 'India', 'Asia', 'Southern and Central Asia', - Decimal('3287263.00'), 1947, 1013662000, - Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), - 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', - 1109, 'IN', - ], - [ - 'CHN', 'China', 'Asia', 'Eastern Asia', - Decimal('9572900.00'), -1523, 1277558000, - Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), - 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', - 1891, 'CN', - ], -] - -CITY_DATA = [ - [3793, 'New York', 'USA', 'New York', 8008278], - [3794, 'Los Angeles', 'USA', 'California', 3694820], - [3795, 'Chicago', 'USA', 'Illinois', 2896016], - [3796, 'Houston', 'USA', 'Texas', 1953631], - [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], - [3798, 'Phoenix', 'USA', 'Arizona', 1321045], - [3799, 'San Diego', 'USA', 'California', 1223400], - [3800, 'Dallas', 'USA', 'Texas', 1188580], - [3801, 'San Antonio', 'USA', 'Texas', 1144646], - [3802, 'Detroit', 'USA', 'Michigan', 951270], - [3803, 'San Jose', 'USA', 'California', 894943], - [3804, 'Indianapolis', 'USA', 'Indiana', 791926], - [3805, 'San Francisco', 'USA', 'California', 776733], - [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], - [1025, 'Delhi', 'IND', 'Delhi', 7206704], - [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], - [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], - [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], - [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], - [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], - [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], - [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], - [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], - [1034, 'Pune', 'IND', 'Maharashtra', 1566651], - [1035, 'Surat', 'IND', 'Gujarat', 1498817], - [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], - [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], - [1891, 'Peking', 'CHN', 'Peking', 7472000], - [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], - [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], - [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], - [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], - [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], - [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], - [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], - [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], - [1900, 'Changchun', 'CHN', 'Jilin', 2812000], - [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], - [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], - [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], - [1904, 'Jinan', 'CHN', 'Shandong', 2278100], - [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], - [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], -] - -LANGUAGE_DATA = [ - ['USA', 'Chinese', False, Decimal('0.6')], - ['USA', 'English', True, Decimal('86.2')], - ['USA', 'French', False, Decimal('0.7')], - ['USA', 'German', False, Decimal('0.7')], - ['USA', 'Italian', False, Decimal('0.6')], - ['USA', 'Japanese', False, Decimal('0.2')], - ['USA', 'Korean', False, Decimal('0.3')], - ['USA', 'Polish', False, Decimal('0.3')], - ['USA', 'Portuguese', False, Decimal('0.2')], - ['USA', 'Spanish', False, Decimal('7.5')], - ['USA', 'Tagalog', False, Decimal('0.4')], - ['USA', 'Vietnamese', False, Decimal('0.2')], - ['IND', 'Asami', False, Decimal('1.5')], - ['IND', 'Bengali', False, Decimal('8.2')], - ['IND', 'Gujarati', False, Decimal('4.8')], - ['IND', 'Hindi', True, Decimal('39.9')], - ['IND', 'Kannada', False, Decimal('3.9')], - ['IND', 'Malajalam', False, Decimal('3.6')], - ['IND', 'Marathi', False, Decimal('7.4')], - ['IND', 'Orija', False, Decimal('3.3')], - ['IND', 'Punjabi', False, Decimal('2.8')], - ['IND', 'Tamil', False, Decimal('6.3')], - ['IND', 'Telugu', False, Decimal('7.8')], - ['IND', 'Urdu', False, Decimal('5.1')], - ['CHN', 'Chinese', True, Decimal('92.0')], - ['CHN', 'Dong', False, Decimal('0.2')], - ['CHN', 'Hui', False, Decimal('0.8')], - ['CHN', 'Mantšu', False, Decimal('0.9')], - ['CHN', 'Miao', False, Decimal('0.7')], - ['CHN', 'Mongolian', False, Decimal('0.4')], - ['CHN', 'Puyi', False, Decimal('0.2')], - ['CHN', 'Tibetan', False, Decimal('0.4')], - ['CHN', 'Tujia', False, Decimal('0.5')], - ['CHN', 'Uighur', False, Decimal('0.6')], - ['CHN', 'Yi', False, Decimal('0.6')], - ['CHN', 'Zhuang', False, Decimal('1.4')], -] - - # establish connection client = Client() with client.connect('127.0.0.1', 10800): - # create tables for query in [ - COUNTRY_CREATE_TABLE_QUERY, - CITY_CREATE_TABLE_QUERY, - LANGUAGE_CREATE_TABLE_QUERY, + Query.COUNTRY_CREATE_TABLE, + Query.CITY_CREATE_TABLE, + Query.LANGUAGE_CREATE_TABLE, ]: client.sql(query) # create indices - for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: + for query in [Query.CITY_CREATE_INDEX, Query.LANGUAGE_CREATE_INDEX]: client.sql(query) # load data - for row in COUNTRY_DATA: - client.sql(COUNTRY_INSERT_QUERY, query_args=row) + for row in TestData.COUNTRY: + client.sql(Query.COUNTRY_INSERT, query_args=row) - for row in CITY_DATA: - client.sql(CITY_INSERT_QUERY, query_args=row) + for row in TestData.CITY: + client.sql(Query.CITY_INSERT, query_args=row) - for row in LANGUAGE_DATA: - client.sql(LANGUAGE_INSERT_QUERY, query_args=row) + for row in TestData.LANGUAGE: + client.sql(Query.LANGUAGE_INSERT, query_args=row) # examine the storage result = client.get_cache_names() - print(result) - # [ - # 'SQL_PUBLIC_CITY', - # 'SQL_PUBLIC_COUNTRY', - # 'PUBLIC', - # 'SQL_PUBLIC_COUNTRYLANGUAGE' - # ] + pprint(result) + # ['SQL_PUBLIC_CITY', 'SQL_PUBLIC_COUNTRY', 'SQL_PUBLIC_COUNTRYLANGUAGE'] city_cache = client.get_or_create_cache('SQL_PUBLIC_CITY') - print(city_cache.settings[PROP_NAME]) + pprint(city_cache.settings[PROP_NAME]) # 'SQL_PUBLIC_CITY' - print(city_cache.settings[PROP_QUERY_ENTITIES]) - # { - # 'key_type_name': ( - # 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d_KEY' - # ), - # 'value_type_name': ( - # 'SQL_PUBLIC_CITY_9ac8e17a_2f99_45b7_958e_06da32882e9d' - # ), - # 'table_name': 'CITY', - # 'query_fields': [ - # ... - # ], - # 'field_name_aliases': [ - # ... - # ], - # 'query_indexes': [] - # } - + pprint(city_cache.settings[PROP_QUERY_ENTITIES]) + # [{'field_name_aliases': [{'alias': 'DISTRICT', 'field_name': 'DISTRICT'}, + # {'alias': 'POPULATION', 'field_name': 'POPULATION'}, + # {'alias': 'COUNTRYCODE', 'field_name': 'COUNTRYCODE'}, + # {'alias': 'ID', 'field_name': 'ID'}, + # {'alias': 'NAME', 'field_name': 'NAME'}], + # 'key_field_name': None, + # 'key_type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497_KEY', + # 'query_fields': [{'default_value': None, + # 'is_key_field': True, + # 'is_notnull_constraint_field': False, + # 'name': 'ID', + # 'precision': -1, + # 'scale': -1, + # 'type_name': 'java.lang.Integer'}, + # {'default_value': None, + # 'is_key_field': False, + # 'is_notnull_constraint_field': False, + # 'name': 'NAME', + # 'precision': 35, + # 'scale': -1, + # 'type_name': 'java.lang.String'}, + # {'default_value': None, + # 'is_key_field': True, + # 'is_notnull_constraint_field': False, + # 'name': 'COUNTRYCODE', + # 'precision': 3, + # 'scale': -1, + # 'type_name': 'java.lang.String'}, + # {'default_value': None, + # 'is_key_field': False, + # 'is_notnull_constraint_field': False, + # 'name': 'DISTRICT', + # 'precision': 20, + # 'scale': -1, + # 'type_name': 'java.lang.String'}, + # {'default_value': None, + # 'is_key_field': False, + # 'is_notnull_constraint_field': False, + # 'name': 'POPULATION', + # 'precision': -1, + # 'scale': -1, + # 'type_name': 'java.lang.Integer'}], + # 'query_indexes': [], + # 'table_name': 'CITY', + # 'value_field_name': None, + # 'value_type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497'}] + + print('-' * 20) with city_cache.scan() as cursor: - print(next(cursor)) - # ( - # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43_KEY( - # ID=1890, - # COUNTRYCODE='CHN', - # version=1 - # ), - # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43( - # NAME='Shanghai', - # DISTRICT='Shanghai', - # POPULATION=9696300, - # version=1 - # ) - # ) - + for line in next(cursor): + pprint(obj_to_dict(line)) + # {'COUNTRYCODE': 'USA', + # 'ID': 3793, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497_KEY'} + # {'DISTRICT': 'New York', + # 'NAME': 'New York', + # 'POPULATION': 8008278, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497'} + + print('-' * 20) with client.sql('SELECT _KEY, _VAL FROM CITY WHERE ID = ?', query_args=[1890]) as cursor: - print(next(cursor)) - # ( - # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43_KEY( - # ID=1890, - # COUNTRYCODE='CHN', - # version=1 - # ), - # SQL_PUBLIC_CITY_6fe650e1_700f_4e74_867d_58f52f433c43( - # NAME='Shanghai', - # DISTRICT='Shanghai', - # POPULATION=9696300, - # version=1 - # ) - # ) + for line in next(cursor): + pprint(obj_to_dict(line)) + # {'COUNTRYCODE': 'CHN', + # 'ID': 1890, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497_KEY'} + # {'DISTRICT': 'Shanghai', + # 'NAME': 'Shanghai', + # 'POPULATION': 9696300, + # 'type_name': 'SQL_PUBLIC_CITY_081f37cc8ac72b10f08ab1273b744497'} # clean up - for table_name in [ - CITY_TABLE_NAME, - LANGUAGE_TABLE_NAME, - COUNTRY_TABLE_NAME, - ]: - result = client.sql(DROP_TABLE_QUERY.format(table_name)) + for table_name in TableNames: + result = client.sql(Query.DROP_TABLE.format(table_name.value)) diff --git a/examples/scans.py b/examples/scans.py index eaafa6e..9346372 100644 --- a/examples/scans.py +++ b/examples/scans.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pprint import pprint + from pyignite import Client client = Client() @@ -39,7 +41,7 @@ # 'key_12' 12 with my_cache.scan() as cursor: - print(dict(cursor)) + pprint(dict(cursor)) # { # 'key_17': 17, # 'key_10': 10, diff --git a/examples/sql.py b/examples/sql.py index d81ff26..269b20b 100644 --- a/examples/sql.py +++ b/examples/sql.py @@ -13,215 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. -from decimal import Decimal - +from helpers.sql_helper import TableNames, Query, TestData from pyignite import Client - -COUNTRY_TABLE_NAME = 'Country' -CITY_TABLE_NAME = 'City' -LANGUAGE_TABLE_NAME = 'CountryLanguage' - -COUNTRY_CREATE_TABLE_QUERY = '''CREATE TABLE Country ( - Code CHAR(3) PRIMARY KEY, - Name CHAR(52), - Continent CHAR(50), - Region CHAR(26), - SurfaceArea DECIMAL(10,2), - IndepYear SMALLINT(6), - Population INT(11), - LifeExpectancy DECIMAL(3,1), - GNP DECIMAL(10,2), - GNPOld DECIMAL(10,2), - LocalName CHAR(45), - GovernmentForm CHAR(45), - HeadOfState CHAR(60), - Capital INT(11), - Code2 CHAR(2) -)''' - -COUNTRY_INSERT_QUERY = '''INSERT INTO Country( - Code, Name, Continent, Region, - SurfaceArea, IndepYear, Population, - LifeExpectancy, GNP, GNPOld, - LocalName, GovernmentForm, HeadOfState, - Capital, Code2 -) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''' - -CITY_CREATE_TABLE_QUERY = '''CREATE TABLE City ( - ID INT(11), - Name CHAR(35), - CountryCode CHAR(3), - District CHAR(20), - Population INT(11), - PRIMARY KEY (ID, CountryCode) -) WITH "affinityKey=CountryCode"''' - -CITY_CREATE_INDEX = ''' -CREATE INDEX idx_country_code ON city (CountryCode)''' - -CITY_INSERT_QUERY = '''INSERT INTO City( - ID, Name, CountryCode, District, Population -) VALUES (?, ?, ?, ?, ?)''' - -LANGUAGE_CREATE_TABLE_QUERY = '''CREATE TABLE CountryLanguage ( - CountryCode CHAR(3), - Language CHAR(30), - IsOfficial BOOLEAN, - Percentage DECIMAL(4,1), - PRIMARY KEY (CountryCode, Language) -) WITH "affinityKey=CountryCode"''' - -LANGUAGE_CREATE_INDEX = ''' -CREATE INDEX idx_lang_country_code ON CountryLanguage (CountryCode)''' - -LANGUAGE_INSERT_QUERY = '''INSERT INTO CountryLanguage( - CountryCode, Language, IsOfficial, Percentage -) VALUES (?, ?, ?, ?)''' - -DROP_TABLE_QUERY = '''DROP TABLE {} IF EXISTS''' - -COUNTRY_DATA = [ - [ - 'USA', 'United States', 'North America', 'North America', - Decimal('9363520.00'), 1776, 278357000, - Decimal('77.1'), Decimal('8510700.00'), Decimal('8110900.00'), - 'United States', 'Federal Republic', 'George W. Bush', - 3813, 'US', - ], - [ - 'IND', 'India', 'Asia', 'Southern and Central Asia', - Decimal('3287263.00'), 1947, 1013662000, - Decimal('62.5'), Decimal('447114.00'), Decimal('430572.00'), - 'Bharat/India', 'Federal Republic', 'Kocheril Raman Narayanan', - 1109, 'IN', - ], - [ - 'CHN', 'China', 'Asia', 'Eastern Asia', - Decimal('9572900.00'), -1523, 1277558000, - Decimal('71.4'), Decimal('982268.00'), Decimal('917719.00'), - 'Zhongquo', 'PeoplesRepublic', 'Jiang Zemin', - 1891, 'CN', - ], -] - -CITY_DATA = [ - [3793, 'New York', 'USA', 'New York', 8008278], - [3794, 'Los Angeles', 'USA', 'California', 3694820], - [3795, 'Chicago', 'USA', 'Illinois', 2896016], - [3796, 'Houston', 'USA', 'Texas', 1953631], - [3797, 'Philadelphia', 'USA', 'Pennsylvania', 1517550], - [3798, 'Phoenix', 'USA', 'Arizona', 1321045], - [3799, 'San Diego', 'USA', 'California', 1223400], - [3800, 'Dallas', 'USA', 'Texas', 1188580], - [3801, 'San Antonio', 'USA', 'Texas', 1144646], - [3802, 'Detroit', 'USA', 'Michigan', 951270], - [3803, 'San Jose', 'USA', 'California', 894943], - [3804, 'Indianapolis', 'USA', 'Indiana', 791926], - [3805, 'San Francisco', 'USA', 'California', 776733], - [1024, 'Mumbai (Bombay)', 'IND', 'Maharashtra', 10500000], - [1025, 'Delhi', 'IND', 'Delhi', 7206704], - [1026, 'Calcutta [Kolkata]', 'IND', 'West Bengali', 4399819], - [1027, 'Chennai (Madras)', 'IND', 'Tamil Nadu', 3841396], - [1028, 'Hyderabad', 'IND', 'Andhra Pradesh', 2964638], - [1029, 'Ahmedabad', 'IND', 'Gujarat', 2876710], - [1030, 'Bangalore', 'IND', 'Karnataka', 2660088], - [1031, 'Kanpur', 'IND', 'Uttar Pradesh', 1874409], - [1032, 'Nagpur', 'IND', 'Maharashtra', 1624752], - [1033, 'Lucknow', 'IND', 'Uttar Pradesh', 1619115], - [1034, 'Pune', 'IND', 'Maharashtra', 1566651], - [1035, 'Surat', 'IND', 'Gujarat', 1498817], - [1036, 'Jaipur', 'IND', 'Rajasthan', 1458483], - [1890, 'Shanghai', 'CHN', 'Shanghai', 9696300], - [1891, 'Peking', 'CHN', 'Peking', 7472000], - [1892, 'Chongqing', 'CHN', 'Chongqing', 6351600], - [1893, 'Tianjin', 'CHN', 'Tianjin', 5286800], - [1894, 'Wuhan', 'CHN', 'Hubei', 4344600], - [1895, 'Harbin', 'CHN', 'Heilongjiang', 4289800], - [1896, 'Shenyang', 'CHN', 'Liaoning', 4265200], - [1897, 'Kanton [Guangzhou]', 'CHN', 'Guangdong', 4256300], - [1898, 'Chengdu', 'CHN', 'Sichuan', 3361500], - [1899, 'Nanking [Nanjing]', 'CHN', 'Jiangsu', 2870300], - [1900, 'Changchun', 'CHN', 'Jilin', 2812000], - [1901, 'Xi´an', 'CHN', 'Shaanxi', 2761400], - [1902, 'Dalian', 'CHN', 'Liaoning', 2697000], - [1903, 'Qingdao', 'CHN', 'Shandong', 2596000], - [1904, 'Jinan', 'CHN', 'Shandong', 2278100], - [1905, 'Hangzhou', 'CHN', 'Zhejiang', 2190500], - [1906, 'Zhengzhou', 'CHN', 'Henan', 2107200], -] - -LANGUAGE_DATA = [ - ['USA', 'Chinese', False, Decimal('0.6')], - ['USA', 'English', True, Decimal('86.2')], - ['USA', 'French', False, Decimal('0.7')], - ['USA', 'German', False, Decimal('0.7')], - ['USA', 'Italian', False, Decimal('0.6')], - ['USA', 'Japanese', False, Decimal('0.2')], - ['USA', 'Korean', False, Decimal('0.3')], - ['USA', 'Polish', False, Decimal('0.3')], - ['USA', 'Portuguese', False, Decimal('0.2')], - ['USA', 'Spanish', False, Decimal('7.5')], - ['USA', 'Tagalog', False, Decimal('0.4')], - ['USA', 'Vietnamese', False, Decimal('0.2')], - ['IND', 'Asami', False, Decimal('1.5')], - ['IND', 'Bengali', False, Decimal('8.2')], - ['IND', 'Gujarati', False, Decimal('4.8')], - ['IND', 'Hindi', True, Decimal('39.9')], - ['IND', 'Kannada', False, Decimal('3.9')], - ['IND', 'Malajalam', False, Decimal('3.6')], - ['IND', 'Marathi', False, Decimal('7.4')], - ['IND', 'Orija', False, Decimal('3.3')], - ['IND', 'Punjabi', False, Decimal('2.8')], - ['IND', 'Tamil', False, Decimal('6.3')], - ['IND', 'Telugu', False, Decimal('7.8')], - ['IND', 'Urdu', False, Decimal('5.1')], - ['CHN', 'Chinese', True, Decimal('92.0')], - ['CHN', 'Dong', False, Decimal('0.2')], - ['CHN', 'Hui', False, Decimal('0.8')], - ['CHN', 'Mantšu', False, Decimal('0.9')], - ['CHN', 'Miao', False, Decimal('0.7')], - ['CHN', 'Mongolian', False, Decimal('0.4')], - ['CHN', 'Puyi', False, Decimal('0.2')], - ['CHN', 'Tibetan', False, Decimal('0.4')], - ['CHN', 'Tujia', False, Decimal('0.5')], - ['CHN', 'Uighur', False, Decimal('0.6')], - ['CHN', 'Yi', False, Decimal('0.6')], - ['CHN', 'Zhuang', False, Decimal('1.4')], -] - - # establish connection client = Client() with client.connect('127.0.0.1', 10800): - # create tables for query in [ - COUNTRY_CREATE_TABLE_QUERY, - CITY_CREATE_TABLE_QUERY, - LANGUAGE_CREATE_TABLE_QUERY, + Query.COUNTRY_CREATE_TABLE, + Query.CITY_CREATE_TABLE, + Query.LANGUAGE_CREATE_TABLE, ]: client.sql(query) # create indices - for query in [CITY_CREATE_INDEX, LANGUAGE_CREATE_INDEX]: + for query in [Query.CITY_CREATE_INDEX, Query.LANGUAGE_CREATE_INDEX]: client.sql(query) # load data - for row in COUNTRY_DATA: - client.sql(COUNTRY_INSERT_QUERY, query_args=row) + for row in TestData.COUNTRY: + client.sql(Query.COUNTRY_INSERT, query_args=row) - for row in CITY_DATA: - client.sql(CITY_INSERT_QUERY, query_args=row) + for row in TestData.CITY: + client.sql(Query.CITY_INSERT, query_args=row) - for row in LANGUAGE_DATA: - client.sql(LANGUAGE_INSERT_QUERY, query_args=row) + for row in TestData.LANGUAGE: + client.sql(Query.LANGUAGE_INSERT, query_args=row) # 10 most populated cities (with pagination) - MOST_POPULATED_QUERY = ''' - SELECT name, population FROM City ORDER BY population DESC LIMIT 10''' - - with client.sql(MOST_POPULATED_QUERY) as cursor: + with client.sql('SELECT name, population FROM City ORDER BY population DESC LIMIT 10') as cursor: print('Most 10 populated cities:') for row in cursor: print(row) @@ -236,45 +57,44 @@ # ['Calcutta [Kolkata]', 4399819] # ['Wuhan', 4344600] # ['Harbin', 4289800] - + print('-' * 20) # 10 most populated cities in 3 countries (with pagination and header row) - MOST_POPULATED_IN_3_COUNTRIES_QUERY = ''' + MOST_POPULATED_IN_3_COUNTRIES = ''' SELECT country.name as country_name, city.name as city_name, MAX(city.population) AS max_pop FROM country JOIN city ON city.countrycode = country.code WHERE country.code IN ('USA','IND','CHN') GROUP BY country.name, city.name ORDER BY max_pop DESC LIMIT 10 ''' - with client.sql(MOST_POPULATED_IN_3_COUNTRIES_QUERY, include_field_names=True) as cursor: + with client.sql(MOST_POPULATED_IN_3_COUNTRIES, include_field_names=True) as cursor: print('Most 10 populated cities in USA, India and China:') - print(next(cursor)) - print('----------------------------------------') + table_str_pattern = '{:15}\t| {:20}\t| {}' + print(table_str_pattern.format(*next(cursor))) + print('*' * 50) for row in cursor: - print(row) + print(table_str_pattern.format(*row)) # Most 10 populated cities in USA, India and China: - # ['COUNTRY_NAME', 'CITY_NAME', 'MAX_POP'] - # ---------------------------------------- - # ['India', 'Mumbai (Bombay)', 10500000] - # ['China', 'Shanghai', 9696300] - # ['United States', 'New York', 8008278] - # ['China', 'Peking', 7472000] - # ['India', 'Delhi', 7206704] - # ['China', 'Chongqing', 6351600] - # ['China', 'Tianjin', 5286800] - # ['India', 'Calcutta [Kolkata]', 4399819] - # ['China', 'Wuhan', 4344600] - # ['China', 'Harbin', 4289800] - - # show city info - CITY_INFO_QUERY = '''SELECT * FROM City WHERE id = ?''' - - with client.sql(CITY_INFO_QUERY, query_args=[3802], include_field_names=True) as cursor: + # COUNTRY_NAME | CITY_NAME | MAX_POP + # ************************************************** + # India | Mumbai (Bombay) | 10500000 + # China | Shanghai | 9696300 + # United States | New York | 8008278 + # China | Peking | 7472000 + # India | Delhi | 7206704 + # China | Chongqing | 6351600 + # China | Tianjin | 5286800 + # India | Calcutta [Kolkata] | 4399819 + # China | Wuhan | 4344600 + # China | Harbin | 4289800 + print('-' * 20) + + # Show city info + with client.sql('SELECT * FROM City WHERE id = ?', query_args=[3802], include_field_names=True) as cursor: field_names = next(cursor) - field_data = list(*cursor) - + field = list(*cursor) print('City info:') - for field_name, field_value in zip(field_names * len(field_data), field_data): - print('{}: {}'.format(field_name, field_value)) + for field_name, field_value in zip(field_names * len(field), field): + print(f'{field_name}: {field_value}') # City info: # ID: 3802 # NAME: Detroit @@ -282,10 +102,6 @@ # DISTRICT: Michigan # POPULATION: 951270 - # clean up - for table_name in [ - CITY_TABLE_NAME, - LANGUAGE_TABLE_NAME, - COUNTRY_TABLE_NAME, - ]: - result = client.sql(DROP_TABLE_QUERY.format(table_name)) + # Clean up + for table_name in TableNames: + result = client.sql(Query.DROP_TABLE.format(table_name.value)) diff --git a/examples/transactions.py b/examples/transactions.py index 53e9c30..b4231fd 100644 --- a/examples/transactions.py +++ b/examples/transactions.py @@ -19,8 +19,8 @@ from pyignite import AioClient, Client from pyignite.datatypes import TransactionIsolation, TransactionConcurrency -from pyignite.datatypes.prop_codes import PROP_CACHE_ATOMICITY_MODE, PROP_NAME from pyignite.datatypes.cache_config import CacheAtomicityMode +from pyignite.datatypes.prop_codes import PROP_CACHE_ATOMICITY_MODE, PROP_NAME from pyignite.exceptions import CacheError @@ -129,17 +129,20 @@ def sync_example(): cache.destroy() -if __name__ == '__main__': +def check_is_transactions_supported(): client = Client() with client.connect('127.0.0.1', 10800): if not client.protocol_context.is_transactions_supported(): print("'Transactions' API is not supported by cluster. Finishing...") exit(0) + +if __name__ == '__main__': + check_is_transactions_supported() + print("Starting sync example") sync_example() if sys.version_info >= (3, 7): print("Starting async example") - loop = asyncio.get_event_loop() - loop.run_until_complete(async_example()) + asyncio.run(async_example()) diff --git a/examples/type_hints.py b/examples/type_hints.py index 8d53bf9..f8adf70 100644 --- a/examples/type_hints.py +++ b/examples/type_hints.py @@ -18,7 +18,6 @@ client = Client() with client.connect('127.0.0.1', 10800): - my_cache = client.get_or_create_cache('my cache') my_cache.put('my key', 42) @@ -43,7 +42,7 @@ # now let us delete both keys at once my_cache.remove_keys([ - 'a', # a default type key + 'a', # a default type key ('a', CharObject), # a key of type CharObject ]) From bc5a52c1c9b70136a63240da9749cabcb5c14c0e Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 9 Aug 2022 11:15:51 +0300 Subject: [PATCH 59/62] IGNITE-17494 use_ssl is not set when auth used (#55) --- pyignite/connection/connection.py | 3 --- tests/security/test_auth.py | 29 +++++++++++++++++++++++++---- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/pyignite/connection/connection.py b/pyignite/connection/connection.py index 3d86f01..4596e23 100644 --- a/pyignite/connection/connection.py +++ b/pyignite/connection/connection.py @@ -48,9 +48,6 @@ def __init__(self, client, host: str = None, port: int = None, username: str = N check_ssl_params(ssl_params) - if self.username and self.password and 'use_ssl' not in ssl_params: - ssl_params['use_ssl'] = True - self.ssl_params = ssl_params self._failed = False diff --git a/tests/security/test_auth.py b/tests/security/test_auth.py index 83ac780..f4ca29b 100644 --- a/tests/security/test_auth.py +++ b/tests/security/test_auth.py @@ -46,8 +46,7 @@ def cleanup(): clear_ignite_work_dir() -def test_auth_success(with_ssl, ssl_params, caplog): - ssl_params['use_ssl'] = with_ssl +def check_auth_success(ssl_params, caplog): listener = AccumulatingConnectionListener() client = Client(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, event_listeners=[listener], **ssl_params) @@ -60,9 +59,18 @@ def test_auth_success(with_ssl, ssl_params, caplog): __assert_successful_connect_events(conn, listener) -@pytest.mark.asyncio -async def test_auth_success_async(with_ssl, ssl_params, caplog): +def test_auth_success_no_explicit_ssl(with_ssl, ssl_params, caplog): + if with_ssl: + ssl_params['use_ssl'] = with_ssl + check_auth_success(ssl_params, caplog) + + +def test_auth_success(with_ssl, ssl_params, caplog): ssl_params['use_ssl'] = with_ssl + check_auth_success(ssl_params, caplog) + + +async def check_auth_success_async(ssl_params, caplog): listener = AccumulatingConnectionListener() client = AioClient(username=DEFAULT_IGNITE_USERNAME, password=DEFAULT_IGNITE_PASSWORD, event_listeners=[listener], **ssl_params) @@ -75,6 +83,19 @@ async def test_auth_success_async(with_ssl, ssl_params, caplog): __assert_successful_connect_events(conn, listener) +@pytest.mark.asyncio +async def test_auth_success_no_explicit_ssl_async(with_ssl, ssl_params, caplog): + if with_ssl: + ssl_params['use_ssl'] = with_ssl + await check_auth_success_async(ssl_params, caplog) + + +@pytest.mark.asyncio +async def test_auth_success_async(with_ssl, ssl_params, caplog): + ssl_params['use_ssl'] = with_ssl + await check_auth_success_async(ssl_params, caplog) + + def __assert_successful_connect_log(conn, caplog): assert any(re.match(rf'Connecting to node\(address={conn.host},\s+port={conn.port}', r.message) for r in caplog.records) From a0309cd727f71cb4b2e40e7887e97a61ed0d6cea Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Mon, 31 Oct 2022 17:34:59 +0300 Subject: [PATCH 60/62] IGNITE-18006 Fix timeouts. --- pyignite/connection/aio_connection.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index 4d13d6e..0c72be1 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -29,6 +29,7 @@ # limitations under the License. import asyncio +from asyncio import CancelledError from collections import OrderedDict from typing import Union @@ -57,7 +58,8 @@ def connection_made(self, transport: asyncio.WriteTransport) -> None: try: self.__send_handshake(transport, self._conn) except Exception as e: - self._handshake_fut.set_exception(e) + if not self._handshake_fut.done(): + self._handshake_fut.set_exception(e) def data_received(self, data: bytes) -> None: self._buffer += data @@ -67,7 +69,7 @@ def data_received(self, data: bytes) -> None: if not self._handshake_fut.done(): hs_response = self.__parse_handshake(packet, self._conn.client) self._handshake_fut.set_result(hs_response) - else: + elif not self._handshake_fut.cancelled() or not self._handshake_fut.exception(): self._conn.process_message(packet) self._buffer = self._buffer[packet_sz:len(self._buffer)] @@ -203,7 +205,8 @@ async def _connect(self): def process_connection_lost(self, err, reconnect=False): self.failed = True for _, fut in self._pending_reqs.items(): - fut.set_exception(err) + if not fut.done(): + fut.set_exception(err) self._pending_reqs.clear() if self._transport_closed_fut and not self._transport_closed_fut.done(): @@ -215,8 +218,11 @@ def process_connection_lost(self, err, reconnect=False): def process_message(self, data): req_id = int.from_bytes(data[4:12], byteorder=PROTOCOL_BYTE_ORDER, signed=True) - if req_id in self._pending_reqs: - self._pending_reqs[req_id].set_result(data) + + req_fut = self._pending_reqs.get(req_id) + if req_fut: + if not req_fut.done(): + req_fut.set_result(data) del self._pending_reqs[req_id] async def _connect_version(self) -> Union[dict, OrderedDict]: From ef1d24884af1770d0f664434ff8b6fccbd65119c Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 2 Nov 2022 16:12:31 +0300 Subject: [PATCH 61/62] IGNITE-18006 Add reproducers --- pyignite/connection/aio_connection.py | 1 - tests/custom/test_timeouts.py | 212 ++++++++++++++++++++++++++ 2 files changed, 212 insertions(+), 1 deletion(-) create mode 100644 tests/custom/test_timeouts.py diff --git a/pyignite/connection/aio_connection.py b/pyignite/connection/aio_connection.py index 0c72be1..13ab681 100644 --- a/pyignite/connection/aio_connection.py +++ b/pyignite/connection/aio_connection.py @@ -29,7 +29,6 @@ # limitations under the License. import asyncio -from asyncio import CancelledError from collections import OrderedDict from typing import Union diff --git a/tests/custom/test_timeouts.py b/tests/custom/test_timeouts.py new file mode 100644 index 0000000..084cf91 --- /dev/null +++ b/tests/custom/test_timeouts.py @@ -0,0 +1,212 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import asyncio +import sys +import time +from asyncio.exceptions import TimeoutError, InvalidStateError + +import pytest + +from pyignite import AioClient +from tests.util import start_ignite_gen + + +@pytest.fixture(autouse=True) +def server1(): + yield from start_ignite_gen(idx=1) + + +@pytest.fixture(autouse=True) +async def proxy(event_loop, server1, cache): + proxy = ProxyServer(("127.0.0.1", 10802), ("127.0.0.1", 10801)) + try: + await proxy.start() + yield proxy + finally: + await proxy.close() + + +@pytest.fixture(autouse=True) +async def cache(server1): + c = AioClient(partition_aware=False) + async with c.connect("127.0.0.1", 10801): + try: + cache = await c.get_or_create_cache("test") + yield cache + finally: + await cache.destroy() + + +@pytest.fixture(autouse=True) +def invalid_states_errors(): + errors = [] + + def trace(_, event, arg): + if event == 'exception': + etype, _, _ = arg + if etype is InvalidStateError: + errors.append(arg) + + return trace + + try: + sys.settrace(trace) + yield errors + finally: + sys.settrace(None) + + +@pytest.mark.asyncio +async def test_cancellation_on_slow_response(event_loop, proxy, invalid_states_errors): + c = AioClient(partition_aware=False) + async with c.connect("127.0.0.1", 10802): + cache = await c.get_cache("test") + proxy.slow_response = True + with pytest.raises(TimeoutError): + await asyncio.wait_for(cache.put(1, 2), 0.1) + + assert len(invalid_states_errors) == 0 + + +@pytest.mark.asyncio +async def test_cancellation_on_disconnect(event_loop, proxy, invalid_states_errors): + c = AioClient(partition_aware=False) + async with c.connect("127.0.0.1", 10802): + cache = await c.get_cache("test") + proxy.discard_response = True + + asyncio.ensure_future(asyncio.wait_for(cache.put(1, 2), 0.1)) + await asyncio.sleep(0.2) + await proxy.disconnect_peers() + + assert len(invalid_states_errors) == 0 + + +class ProxyServer: + """ + Proxy for simulating slow or discarding response ignite server + Set `slow_response`, `discard_response` to `True` to simulate these conditions. + Call `disconnect_peers()` in order to simulate lost connection to Ignite server. + """ + def __init__(self, local_host, remote_host): + self.local_host = local_host + self.remote_host = remote_host + self.peers = {} + self.slow_response = False + self.discard_response = False + self.server = None + + async def start(self): + loop = asyncio.get_event_loop() + host, port = self.local_host + self.server = await loop.create_server( + lambda: ProxyTcpProtocol(self), host=host, port=port) + + async def disconnect_peers(self): + peers = dict(self.peers) + for k, v in peers.items(): + if not v: + return + + local, remote = v + if local: + await remote.close() + if remote: + await local.close() + + async def close(self): + try: + await self.disconnect_peers() + except TimeoutError: + pass + + self.server.close() + + +class ProxyTcpProtocol(asyncio.Protocol): + def __init__(self, proxy): + self.addr, self.port = proxy.remote_host + self.proxy = proxy + self.transport, self.remote_protocol, self.conn_info, self.close_fut = None, None, None, None + super().__init__() + + def connection_made(self, transport): + self.transport = transport + self.conn_info = transport.get_extra_info("peername") + + def data_received(self, data): + if self.remote_protocol and self.remote_protocol.transport: + self.remote_protocol.transport.write(data) + return + + loop = asyncio.get_event_loop() + self.remote_protocol = RemoteTcpProtocol(self.proxy, self, data) + coro = loop.create_connection(lambda: self.remote_protocol, host=self.addr, port=self.port) + asyncio.ensure_future(coro) + + self.proxy.peers[self.conn_info] = (self, self.remote_protocol) + + async def close(self): + if not self.transport: + return + + self.close_fut = asyncio.get_event_loop().create_future() + self.transport.close() + + try: + await asyncio.wait_for(self.close_fut, 0.1) + except TimeoutError: + pass + + def connection_lost(self, exc): + if self.close_fut: + self.close_fut.done() + + +class RemoteTcpProtocol(asyncio.Protocol): + def __init__(self, proxy, proxy_protocol, data): + self.proxy = proxy + self.proxy_protocol = proxy_protocol + self.data = data + self.transport, self.close_fut = None, None + super().__init__() + + def connection_made(self, transport): + self.transport = transport + self.transport.write(self.data) + + async def close(self): + if not self.transport: + return + + self.close_fut = asyncio.get_event_loop().create_future() + self.transport.close() + try: + await asyncio.wait_for(self.close_fut, 0.1) + except TimeoutError: + pass + + def connection_lost(self, exc): + if self.close_fut: + self.close_fut.done() + + def data_received(self, data): + if self.proxy.discard_response: + return + + if self.proxy.slow_response: + time.sleep(0.5) + + self.proxy_protocol.transport.write(data) From 0dd471b9d75ce7824efaa42e18fcd71cbd24ff04 Mon Sep 17 00:00:00 2001 From: Ivan Daschinsky Date: Wed, 2 Nov 2022 16:25:01 +0300 Subject: [PATCH 62/62] IGNITE-18006 Remove travis, add github actions --- .github/workflows/pr_check.yml | 58 ++++++++++++++++++++++++++++++++++ .travis.yml | 54 ------------------------------- requirements/tests.txt | 3 +- setup.py | 1 + tests/custom/test_timeouts.py | 15 ++++----- tox.ini | 4 +-- 6 files changed, 68 insertions(+), 67 deletions(-) create mode 100644 .github/workflows/pr_check.yml delete mode 100644 .travis.yml diff --git a/.github/workflows/pr_check.yml b/.github/workflows/pr_check.yml new file mode 100644 index 0000000..5aaf49f --- /dev/null +++ b/.github/workflows/pr_check.yml @@ -0,0 +1,58 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Check code style and run tests +on: [ push, pull_request ] + +env: + IGNITE_VERSION: 2.14.0 + IGNITE_HOME: /opt/ignite + +jobs: + build: + runs-on: ubuntu-latest + continue-on-error: true + strategy: + fail-fast: false + matrix: + cfg: + - { python: "3.7", toxenv: "py37" } + - { python: "3.8", toxenv: "py38" } + - { python: "3.9", toxenv: "py39" } + - { python: "3.10", toxenv: "py310" } + - { python: "3.11", toxenv: "py311" } + - { python: "3.11", toxenv: "codestyle" } + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.cfg.python}} + - name: Install Apache Ignite + run: | + curl -L https://apache-mirror.rbc.ru/pub/apache/ignite/${IGNITE_VERSION}/apache-ignite-slim-${IGNITE_VERSION}-bin.zip > ignite.zip + unzip ignite.zip -d /opt + mv /opt/apache-ignite-slim-${IGNITE_VERSION}-bin /opt/ignite + mv /opt/ignite/libs/optional/ignite-log4j2 /opt/ignite/libs/ + + - name: Install tox + run: | + pip install tox + + - name: Run tests + run: | + pip install tox + tox -e ${{ matrix.cfg.toxenv }} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 45f26f6..0000000 --- a/.travis.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -language: python -sudo: required - -addons: - apt: - packages: - - openjdk-8-jdk - -env: - global: - - IGNITE_VERSION=2.13.0 - - IGNITE_HOME=/opt/ignite - -before_install: - - curl -L https://apache-mirror.rbc.ru/pub/apache/ignite/${IGNITE_VERSION}/apache-ignite-slim-${IGNITE_VERSION}-bin.zip > ignite.zip - - unzip ignite.zip -d /opt - - mv /opt/apache-ignite-slim-${IGNITE_VERSION}-bin /opt/ignite - - mv /opt/ignite/libs/optional/ignite-log4j2 /opt/ignite/libs/ - -jobs: - include: - - python: '3.7' - arch: amd64 - env: TOXENV=py37 - - python: '3.8' - arch: amd64 - env: TOXENV=py38 - - python: '3.8' - arch: amd64 - env: TOXENV=codestyle - - python: '3.9' - arch: amd64 - env: TOXENV=py39 - - python: '3.10' - arch: amd64 - env: TOXENV=py310 - -install: pip install tox -script: tox diff --git a/requirements/tests.txt b/requirements/tests.txt index 7262fe9..5dc815a 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,7 +1,6 @@ # these packages are used for testing -async_generator==1.10; python_version < '3.7' -pytest==6.2.2 +pytest==6.2.5 pytest-cov==2.11.1 pytest-asyncio==0.14.0 teamcity-messages==1.28 diff --git a/setup.py b/setup.py index 91a72f5..827066a 100644 --- a/setup.py +++ b/setup.py @@ -110,6 +110,7 @@ def run_setup(with_binary=True): 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3 :: Only', 'Intended Audience :: Developers', 'Topic :: Database :: Front-Ends', diff --git a/tests/custom/test_timeouts.py b/tests/custom/test_timeouts.py index 084cf91..e70fe70 100644 --- a/tests/custom/test_timeouts.py +++ b/tests/custom/test_timeouts.py @@ -14,8 +14,7 @@ # limitations under the License. import asyncio import sys -import time -from asyncio.exceptions import TimeoutError, InvalidStateError +from asyncio import TimeoutError, InvalidStateError import pytest @@ -73,10 +72,12 @@ async def test_cancellation_on_slow_response(event_loop, proxy, invalid_states_e c = AioClient(partition_aware=False) async with c.connect("127.0.0.1", 10802): cache = await c.get_cache("test") - proxy.slow_response = True + proxy.discard_response = True # Simulate slow response by discarding it + with pytest.raises(TimeoutError): await asyncio.wait_for(cache.put(1, 2), 0.1) + proxy.discard_response = False assert len(invalid_states_errors) == 0 @@ -96,15 +97,14 @@ async def test_cancellation_on_disconnect(event_loop, proxy, invalid_states_erro class ProxyServer: """ - Proxy for simulating slow or discarding response ignite server - Set `slow_response`, `discard_response` to `True` to simulate these conditions. + Proxy for simulating discarding response from ignite server + Set `discard_response` to `True` to simulate this condition. Call `disconnect_peers()` in order to simulate lost connection to Ignite server. """ def __init__(self, local_host, remote_host): self.local_host = local_host self.remote_host = remote_host self.peers = {} - self.slow_response = False self.discard_response = False self.server = None @@ -206,7 +206,4 @@ def data_received(self, data): if self.proxy.discard_response: return - if self.proxy.slow_response: - time.sleep(0.5) - self.proxy_protocol.transport.write(data) diff --git a/tox.ini b/tox.ini index e873e21..d68f02e 100644 --- a/tox.ini +++ b/tox.ini @@ -15,7 +15,7 @@ [tox] skipsdist = True -envlist = codestyle,py{37,38,39,310} +envlist = codestyle,py{37,38,39,310,311} [pytest] log_format = %(asctime)s %(name)s %(levelname)s %(message)s @@ -43,6 +43,6 @@ usedevelop = True commands = pytest {env:PYTESTARGS:} {posargs} --force-cext --examples -[testenv:py{36,37,38,39}-jenkins] +[testenv:py{37,38,39,310,311}-jenkins] setenv: PYTESTARGS = --junitxml=junit-{envname}.xml