diff --git a/CHANGELOG.md b/CHANGELOG.md index e15d2d1a2..c17b0e95f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ See also https://github.com/neo4j/neo4j-python-driver/wiki for a full changelog. ## NEXT RELEASE -- No breaking or major changes. +- Python 3.7, 3.8, and 3.9 support has been dropped. ## Version 5.28 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6c1c8efcf..b1bd10d36 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -58,7 +58,7 @@ install the pre-commit hooks as described below instead. They will take care of updating the code if necessary. Setting up the development environment: - * Install Python 3.8+ + * Install Python 3.10+ * Install the requirements ```bash $ python3 -m pip install -U pip diff --git a/README.rst b/README.rst index 71d586998..543ee378a 100644 --- a/README.rst +++ b/README.rst @@ -16,13 +16,10 @@ breaking API changes. See also: https://neo4j.com/developer/kb/neo4j-supported-versions/ -+ Python 3.13 supported (since driver version 5.26.0). -+ Python 3.12 supported (since driver version 5.14.0). -+ Python 3.11 supported (since driver version 5.3.0). ++ Python 3.13 supported. ++ Python 3.12 supported. ++ Python 3.11 supported. + Python 3.10 supported. -+ Python 3.9 supported. -+ Python 3.8 supported. -+ Python 3.7 supported. Installation diff --git a/TESTING.md b/TESTING.md index 187bfb755..db07ead9d 100644 --- a/TESTING.md +++ b/TESTING.md @@ -1,6 +1,6 @@ # Neo4j Driver Testing To run driver tests, [Tox](https://tox.readthedocs.io) is required as well as at least one version of Python. -The versions of Python supported by this driver are CPython 3.7 - 3.12 +The versions of Python supported by this driver are CPython 3.10 - 3.13 ## Testing with TestKit TestKit is the shared test suite used by all official (and some community contributed) Neo4j drivers to ensure consistent and correct behavior across all drivers. diff --git a/benchkit/app.py b/benchkit/app.py index b4e56853c..a2e9e756e 100644 --- a/benchkit/app.py +++ b/benchkit/app.py @@ -16,7 +16,6 @@ from __future__ import annotations -import sys import typing as t from contextlib import contextmanager from multiprocessing import Semaphore @@ -44,10 +43,7 @@ from .workloads import Workload -if sys.version_info < (3, 8): - T_App: te.TypeAlias = "Sanic" -else: - T_App: te.TypeAlias = "Sanic[Config, BenchKitContext]" +T_App: te.TypeAlias = "Sanic[Config, BenchKitContext]" def create_app() -> T_App: diff --git a/benchkit/workloads.py b/benchkit/workloads.py index 21c2c6506..3c0dbd8c9 100644 --- a/benchkit/workloads.py +++ b/benchkit/workloads.py @@ -20,12 +20,13 @@ import enum import typing as t from dataclasses import dataclass -from typing import Iterator import typing_extensions as te if t.TYPE_CHECKING: + from collections.abc import Iterator + from neo4j import ( AsyncDriver, AsyncManagedTransaction, diff --git a/bin/make-unasync b/bin/make-unasync index 4c8b18084..ef08f25c8 100755 --- a/bin/make-unasync +++ b/bin/make-unasync @@ -113,6 +113,8 @@ class CustomRule(unasync.Rule): # it's not pretty, but it works # typing.Awaitable[...] -> typing.Union[...] self.token_replacements["Awaitable"] = "Union" + self.token_replacements["aiter"] = "iter" + self.token_replacements["anext"] = "next" def _unasync_tokens(self, tokens): # copy from unasync to fix handling of multiline strings @@ -300,16 +302,15 @@ def apply_isort(paths): def apply_changes(paths): def files_equal(path1, path2): - with open(path1, "rb") as f1: - with open(path2, "rb") as f2: + with open(path1, "rb") as f1, open(path2, "rb") as f2: + data1 = f1.read(1024) + data2 = f2.read(1024) + while data1 or data2: + if data1 != data2: + changed_paths[path1] = path2 + return False data1 = f1.read(1024) data2 = f2.read(1024) - while data1 or data2: - if data1 != data2: - changed_paths[path1] = path2 - return False - data1 = f1.read(1024) - data2 = f2.read(1024) return True changed_paths = {} diff --git a/docs/README.md b/docs/README.md index 8222e07ea..17bb3495e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,7 +2,7 @@ Sphinx Documentation ==================== -Building the docs requires Python 3.8+ +Building the docs requires Python 3.10+ In project root ``` diff --git a/docs/source/async_api.rst b/docs/source/async_api.rst index 6c6e62e49..4f9e31094 100644 --- a/docs/source/async_api.rst +++ b/docs/source/async_api.rst @@ -6,11 +6,6 @@ Async API Documentation .. versionadded:: 5.0 -.. warning:: - There are known issue with Python 3.8 and the async driver where it - gradually slows down. Generally, it's recommended to use the latest - supported version of Python for best performance, stability, and security. - ****************** AsyncGraphDatabase ****************** diff --git a/docs/source/index.rst b/docs/source/index.rst index 3ed0dd475..185fbaeb8 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -18,13 +18,10 @@ See https://neo4j.com/developer/kb/neo4j-supported-versions/ for a driver-server Python versions supported: -* Python 3.13 (added in driver version 5.26.0) -* Python 3.12 (added in driver version 5.14.0) -* Python 3.11 (added in driver version 5.3.0) +* Python 3.13 +* Python 3.12 +* Python 3.11 * Python 3.10 -* Python 3.9 -* Python 3.8 -* Python 3.7 ****** diff --git a/pyproject.toml b/pyproject.toml index 2cb079452..783f0bc76 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,7 @@ authors = [ {name = "Neo4j, Inc.", email = "drivers@neo4j.com"}, ] dependencies = ["pytz"] -requires-python = ">=3.7" +requires-python = ">=3.10" keywords = ["neo4j", "graph", "database"] classifiers = [ "Development Status :: 5 - Production/Stable", @@ -31,9 +31,6 @@ classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", @@ -65,9 +62,7 @@ pyarrow = ["pyarrow >= 1.0.0"] [build-system] requires = [ - "setuptools == 68.0.0; python_version <= '3.7'", # dropped support for Python 3.7 in 68.1.0 - "setuptools == 75.3.0; python_version == '3.8'", # dropped support for Python 3.8 in 75.4.0 - "setuptools == 75.6.0; python_version >= '3.9'", + "setuptools == 75.6.0", # TODO: 6.0 - can be removed once `setup.py` is simplified "tomlkit == 0.12.5", # dropped support (at least CI testing) for Python 3.7 in 0.13.0 ] @@ -123,6 +118,7 @@ use_parentheses = true [tool.pytest.ini_options] mock_use_standalone_module = true asyncio_mode = "strict" +asyncio_default_fixture_loop_scope="function" [tool.mypy] @@ -148,8 +144,7 @@ extend-exclude = [ preview = true # to get CPY lints extend-ignore = [ "RUF002", # allow ’ (RIGHT SINGLE QUOTATION MARK) to be used as an apostrophe (e.g. "it’s") - "SIM117", # TODO: when Python 3.10+ is the minimum, - # we can start to use multi-item `with` statements + # pydocstyle "D1", # disable check for undocumented items (way too noisy) "D203", # `one-blank-line-before-class` @@ -199,6 +194,9 @@ extend-ignore = [ # needs fixing in ruff to work with typing.Protocol # https://github.com/astral-sh/ruff/issues/13307 "FURB180", + + # rule is deprected and suggests not recommended practice + "UP038", ] select = [ # ruff diff --git a/requirements-dev.txt b/requirements-dev.txt index ef164b169..03cf6c4f8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -2,31 +2,31 @@ -e .[pandas,numpy,pyarrow] # needed for packaging -build>=1.1.1 # TODO: 6.0 - bump when support for Python 3.7 is dropped +build>=1.2.2 # auto-generate sync driver from async code unasync==0.5.0 # pre-commit hooks and tools -pre-commit>=2.21.0 # TODO: 6.0 - bump when support for Python 3.7 is dropped -isort>=5.11.5 # TODO: 6.0 - bump when support for Python 3.7 is dropped -mypy>=1.4.1 # TODO: 6.0 - bump when support for Python 3.7 is dropped -typing-extensions>=4.7.1 -types-pytz>=2023.3.1.1 # TODO: 6.0 - bump when support for Python 3.7 is dropped -ruff>=0.8.2 +pre-commit>=4.2.0 +isort>=6.0.1 +mypy>=1.15.0 +typing-extensions>=4.13.2 +types-pytz>=2025.2.0.20250326 +ruff>=0.11.6 # needed for running tests -coverage[toml]>=7.2.7 # TODO: 6.0 - bump when support for Python 3.7 is dropped +coverage[toml]>=7.8.0 freezegun>=1.5.1 -mock>=5.1.0 -pytest>=7.4.4 # TODO: 6.0 - bump when support for Python 3.7 is dropped -pytest-asyncio~=0.21.2 # TODO: 6.0 - bump when support for Python 3.7 is dropped -pytest-benchmark>=4.0.0 -pytest-cov>=4.1.0 # TODO: 6.0 - bump when support for Python 3.7 is dropped -pytest-mock>=3.11.1 # TODO: 6.0 - bump when support for Python 3.7 is dropped -tox>=4.8.0 # TODO: 6.0 - bump when support for Python 3.7 is dropped +mock>=5.2.0 +pytest>=8.3.5 +pytest-asyncio>=0.26.0 +pytest-benchmark>=5.1.0 +pytest-cov>=6.1.1 +pytest-mock>=3.14.0 +tox>=4.25.0 # needed for building docs -Sphinx>=5.3.0 # TODO: 6.0 - bump when support for Python 3.7 is dropped +Sphinx>=8.1.3 # needed for BenchKit -sanic>=23.3.0 # TODO: 6.0 - bump when support for Python 3.7 is dropped +sanic>=25.3.0 diff --git a/src/neo4j/__init__.py b/src/neo4j/__init__.py index f881e3c3f..67dc88583 100644 --- a/src/neo4j/__init__.py +++ b/src/neo4j/__init__.py @@ -225,7 +225,7 @@ def __getattr__(name) -> _t.Any: raise AttributeError(f"module {__name__} has no attribute {name}") -def __dir__() -> _t.List[str]: +def __dir__() -> list[str]: return __all__ diff --git a/src/neo4j/_api.py b/src/neo4j/_api.py index 0e74c593c..c87e7424e 100644 --- a/src/neo4j/_api.py +++ b/src/neo4j/_api.py @@ -67,14 +67,14 @@ class NotificationMinimumSeverity(str, Enum): if t.TYPE_CHECKING: - T_NotificationMinimumSeverity = t.Union[ - NotificationMinimumSeverity, - te.Literal[ + T_NotificationMinimumSeverity = ( + NotificationMinimumSeverity + | te.Literal[ "OFF", "WARNING", "INFORMATION", - ], - ] + ] + ) __all__.append("T_NotificationMinimumSeverity") @@ -213,10 +213,10 @@ class NotificationDisabledClassification(str, Enum): if t.TYPE_CHECKING: - T_NotificationDisabledCategory = t.Union[ - NotificationDisabledCategory, - NotificationDisabledClassification, - te.Literal[ + T_NotificationDisabledCategory = ( + NotificationDisabledCategory + | NotificationDisabledClassification + | te.Literal[ "HINT", "UNRECOGNIZED", "UNSUPPORTED", @@ -226,8 +226,8 @@ class NotificationDisabledClassification(str, Enum): "SECURITY", "TOPOLOGY", "SCHEMA", - ], - ] + ] + ) __all__.append("T_NotificationDisabledCategory") @@ -343,8 +343,5 @@ class TelemetryAPI(int, Enum): if t.TYPE_CHECKING: - T_RoutingControl = t.Union[ - RoutingControl, - te.Literal["r", "w"], - ] + T_RoutingControl = RoutingControl | te.Literal["r", "w"] __all__.append("T_RoutingControl") diff --git a/src/neo4j/_async/_debug/_concurrency_check.py b/src/neo4j/_async/_debug/_concurrency_check.py index 9b295e31d..15bb03f6c 100644 --- a/src/neo4j/_async/_debug/_concurrency_check.py +++ b/src/neo4j/_async/_debug/_concurrency_check.py @@ -129,7 +129,7 @@ async def inner(*args, **kwargs): tbs = deepcopy(self.__tracebacks) if acquired: try: - item = await iter_.__anext__() + item = await anext(iter_) except StopAsyncIteration: return finally: diff --git a/src/neo4j/_async/bookmark_manager.py b/src/neo4j/_async/bookmark_manager.py index e4ec2a1d6..5081d9ca2 100644 --- a/src/neo4j/_async/bookmark_manager.py +++ b/src/neo4j/_async/bookmark_manager.py @@ -26,8 +26,8 @@ ) -TBmSupplier = t.Callable[[], t.Union[Bookmarks, t.Awaitable[Bookmarks]]] -TBmConsumer = t.Callable[[Bookmarks], t.Union[None, t.Awaitable[None]]] +TBmSupplier = t.Callable[[], Bookmarks | t.Awaitable[Bookmarks]] +TBmConsumer = t.Callable[[Bookmarks], None | t.Awaitable[None]] def _bookmarks_to_set( diff --git a/src/neo4j/_async/home_db_cache.py b/src/neo4j/_async/home_db_cache.py index 96c2850f6..31f53f618 100644 --- a/src/neo4j/_async/home_db_cache.py +++ b/src/neo4j/_async/home_db_cache.py @@ -28,12 +28,8 @@ if t.TYPE_CHECKING: import typing_extensions as te - TKey: te.TypeAlias = t.Union[ - str, - t.Tuple[t.Tuple[str, t.Hashable], ...], - t.Tuple[None], - ] - TVal: te.TypeAlias = t.Tuple[float, str] + TKey: te.TypeAlias = str | tuple[tuple[str, t.Hashable], ...] | tuple[None] + TVal: te.TypeAlias = tuple[float, str] class AsyncHomeDbCache: diff --git a/src/neo4j/_async/io/_bolt3.py b/src/neo4j/_async/io/_bolt3.py index 2997296fc..d28420705 100644 --- a/src/neo4j/_async/io/_bolt3.py +++ b/src/neo4j/_async/io/_bolt3.py @@ -309,7 +309,8 @@ async def route( await self.send_all() await self.fetch_all() return [ - dict(zip(metadata.get("fields", ()), values)) for values in records + dict(zip(metadata.get("fields", ()), values, strict=True)) + for values in records ] def run( diff --git a/src/neo4j/_async/io/_bolt4.py b/src/neo4j/_async/io/_bolt4.py index 138ee51bb..4feef2e99 100644 --- a/src/neo4j/_async/io/_bolt4.py +++ b/src/neo4j/_async/io/_bolt4.py @@ -224,7 +224,8 @@ async def route( await self.send_all() await self.fetch_all() return [ - dict(zip(metadata.get("fields", ()), values)) for values in records + dict(zip(metadata.get("fields", ()), values, strict=True)) + for values in records ] def run( diff --git a/src/neo4j/_async/work/result.py b/src/neo4j/_async/work/result.py index 721fac4ac..9c911a0be 100644 --- a/src/neo4j/_async/work/result.py +++ b/src/neo4j/_async/work/result.py @@ -78,7 +78,7 @@ _driver_dir = _driver_dir.parent _T = t.TypeVar("_T") -_TResultKey = t.Union[int, str] +_TResultKey: t.TypeAlias = int | str _RESULT_FAILED_ERROR = ( @@ -247,7 +247,8 @@ def on_records(records): for record in records ) self._record_buffer.extend( - Record(zip(self._keys, record)) for record in records + Record(zip(self._keys, record, strict=True)) + for record in records ) async def _on_summary(): @@ -420,7 +421,7 @@ async def __anext__(self) -> Record: :raises StopAsyncIteration: if no more records are available. """ - return await self.__aiter__().__anext__() + return await anext(aiter(self)) async def _attach(self): # Set the Result object in an attached state by fetching messages @@ -918,7 +919,7 @@ async def to_df( else: # The rows have different keys. We need to pass a list # of dicts to pandas - rows = [dict(zip(df_keys, r)) for r in rows] + rows = [dict(zip(df_keys, r, strict=True)) for r in rows] df_keys = False rows.append(row) if df_keys is False: diff --git a/src/neo4j/_async_compat/network/_bolt_socket.py b/src/neo4j/_async_compat/network/_bolt_socket.py index 0918c5a72..fa203ca44 100644 --- a/src/neo4j/_async_compat/network/_bolt_socket.py +++ b/src/neo4j/_async_compat/network/_bolt_socket.py @@ -22,11 +22,6 @@ import logging import typing as t from contextlib import suppress - - -# fmt: off -# isort: off -# isort seems buggy with the noqa comment from socket import ( AF_INET, AF_INET6, @@ -36,10 +31,7 @@ socket, SOL_SOCKET, TCP_NODELAY, - timeout as SocketTimeout, # noqa: N812 (it is a class) ) -# isort: on -# fmt: on from ssl import ( CertificateError, HAS_SNI, @@ -91,7 +83,7 @@ def __init__(self, reader, protocol, writer) -> None: async def _wait_for_io(self, io_async_fn, *args, **kwargs): timeout = self._timeout - to_raise = SocketTimeout + to_raise = TimeoutError if self._deadline is not None: deadline_timeout = self._deadline.to_timeout() if deadline_timeout <= 0: @@ -345,7 +337,7 @@ def _wait_for_io(self, func, *args, **kwargs): self._socket.settimeout(deadline_timeout) try: return func(*args, **kwargs) - except SocketTimeout as e: + except TimeoutError as e: raise SocketDeadlineExceededError("timed out") from e finally: self._socket.settimeout(timeout) @@ -410,7 +402,7 @@ def _connect_secure( s.settimeout(t) keep_alive = 1 if keep_alive else 0 s.setsockopt(SOL_SOCKET, SO_KEEPALIVE, keep_alive) - except SocketTimeout: + except TimeoutError: log.debug("[#0000] S: %s", resolved_address) raise ServiceUnavailable( "Timed out trying to establish connection to " diff --git a/src/neo4j/_async_compat/util.py b/src/neo4j/_async_compat/util.py index ce6b0089e..b952c4d9f 100644 --- a/src/neo4j/_async_compat/util.py +++ b/src/neo4j/_async_compat/util.py @@ -37,15 +37,6 @@ class AsyncUtil: - @staticmethod - async def iter(it): - async for x in it: - yield x - - @staticmethod - async def next(it): - return await it.__anext__() - @staticmethod async def list(it): return [x async for x in it] @@ -97,8 +88,6 @@ def extract_stack(limit=None): class Util: - iter: t.ClassVar = iter - next: t.ClassVar = next list: t.ClassVar = list @staticmethod diff --git a/src/neo4j/_codec/hydration/_common.py b/src/neo4j/_codec/hydration/_common.py index b1fd418a4..ec67a82b1 100644 --- a/src/neo4j/_codec/hydration/_common.py +++ b/src/neo4j/_codec/hydration/_common.py @@ -24,8 +24,8 @@ @dataclass class DehydrationHooks: - exact_types: t.Dict[t.Type, t.Callable[[t.Any], t.Any]] - subtypes: t.Dict[t.Type, t.Callable[[t.Any], t.Any]] + exact_types: dict[type, t.Callable[[t.Any], t.Any]] + subtypes: dict[type, t.Callable[[t.Any], t.Any]] def update(self, exact_types=None, subtypes=None): exact_types = exact_types or {} diff --git a/src/neo4j/_codec/packstream/v1/types.py b/src/neo4j/_codec/packstream/v1/types.py index 2b031b2c8..1224889be 100644 --- a/src/neo4j/_codec/packstream/v1/types.py +++ b/src/neo4j/_codec/packstream/v1/types.py @@ -14,24 +14,22 @@ # limitations under the License. -import typing as t - from ...._optional_deps import ( np, pd, ) -NONE_VALUES: t.Tuple = (None,) -TRUE_VALUES: t.Tuple = (True,) -FALSE_VALUES: t.Tuple = (False,) -INT_TYPES: t.Tuple[t.Type, ...] = (int,) -FLOAT_TYPES: t.Tuple[t.Type, ...] = (float,) +NONE_VALUES: tuple = (None,) +TRUE_VALUES: tuple = (True,) +FALSE_VALUES: tuple = (False,) +INT_TYPES: tuple[type, ...] = (int,) +FLOAT_TYPES: tuple[type, ...] = (float,) # we can't put tuple here because spatial types subclass tuple, # and we don't want to treat them as sequences -SEQUENCE_TYPES: t.Tuple[t.Type, ...] = (list,) -MAPPING_TYPES: t.Tuple[t.Type, ...] = (dict,) -BYTES_TYPES: t.Tuple[t.Type, ...] = (bytes, bytearray) +SEQUENCE_TYPES: tuple[type, ...] = (list,) +MAPPING_TYPES: tuple[type, ...] = (dict,) +BYTES_TYPES: tuple[type, ...] = (bytes, bytearray) if np is not None: diff --git a/src/neo4j/_data.py b/src/neo4j/_data.py index 474ce6a9e..0cbe300e0 100644 --- a/src/neo4j/_data.py +++ b/src/neo4j/_data.py @@ -54,7 +54,7 @@ _T = t.TypeVar("_T") -_K = t.Union[int, str] +_K: t.TypeAlias = int | str class Record(tuple, Mapping): @@ -92,7 +92,9 @@ def _super_getitem_single(self, index): def __repr__(self) -> str: fields = " ".join( f"{field}={value!r}" - for field, value in zip(self.__keys, super().__iter__()) + for field, value in zip( + self.__keys, super().__iter__(), strict=True + ) ) return f"<{self.__class__.__name__} {fields}>" @@ -140,7 +142,7 @@ def __getitem__( # type: ignore[override] if isinstance(key, slice): keys = self.__keys[key] values = super().__getitem__(key) - return self.__class__(zip(keys, values)) + return self.__class__(zip(keys, values, strict=True)) try: index = self.index(key) except IndexError: @@ -154,7 +156,7 @@ def __getslice__(self, start, stop): # noqa: PLW3201 will be removed key = slice(start, stop) keys = self.__keys[key] values = tuple(self)[key] - return self.__class__(zip(keys, values)) + return self.__class__(zip(keys, values, strict=True)) def get(self, key: str, default: object = None) -> t.Any: """ diff --git a/src/neo4j/_spatial/__init__.py b/src/neo4j/_spatial/__init__.py index bf6f19c47..a8c4ef3bf 100644 --- a/src/neo4j/_spatial/__init__.py +++ b/src/neo4j/_spatial/__init__.py @@ -27,7 +27,7 @@ srid_table_lock = Lock() -class Point(t.Tuple[float, ...]): +class Point(tuple[float, ...]): """ Base-class for spatial data. @@ -58,8 +58,7 @@ def y(self) -> float: ... def z(self) -> float: ... def __new__(cls, iterable: t.Iterable[float]) -> Point: - # TODO: 6.0 - remove type ignore when support for Python 3.7 is dropped - return tuple.__new__(cls, map(float, iterable)) # type: ignore[type-var] + return tuple.__new__(cls, map(float, iterable)) def __repr__(self) -> str: return f"POINT({' '.join(map(str, self))})" @@ -103,7 +102,7 @@ def accessor(self, i=index, f=subclass_field): for field_alias in (subclass_field, "xyz"[index]): attributes[field_alias] = property(accessor) - cls = t.cast(t.Type[Point], type(name, (Point,), attributes)) + cls = t.cast(type[Point], type(name, (Point,), attributes)) with srid_table_lock: for dim, srid_ in srid_map.items(): diff --git a/src/neo4j/_sync/_debug/_concurrency_check.py b/src/neo4j/_sync/_debug/_concurrency_check.py index 70180a962..b8712a8b2 100644 --- a/src/neo4j/_sync/_debug/_concurrency_check.py +++ b/src/neo4j/_sync/_debug/_concurrency_check.py @@ -129,7 +129,7 @@ def inner(*args, **kwargs): tbs = deepcopy(self.__tracebacks) if acquired: try: - item = iter_.__next__() + item = next(iter_) except StopIteration: return finally: diff --git a/src/neo4j/_sync/bookmark_manager.py b/src/neo4j/_sync/bookmark_manager.py index 8613de45a..3d024bb62 100644 --- a/src/neo4j/_sync/bookmark_manager.py +++ b/src/neo4j/_sync/bookmark_manager.py @@ -26,8 +26,8 @@ ) -TBmSupplier = t.Callable[[], t.Union[Bookmarks, t.Union[Bookmarks]]] -TBmConsumer = t.Callable[[Bookmarks], t.Union[None, t.Union[None]]] +TBmSupplier = t.Callable[[], Bookmarks | t.Union[Bookmarks]] +TBmConsumer = t.Callable[[Bookmarks], None | t.Union[None]] def _bookmarks_to_set( diff --git a/src/neo4j/_sync/home_db_cache.py b/src/neo4j/_sync/home_db_cache.py index 904fa6625..587afd0cd 100644 --- a/src/neo4j/_sync/home_db_cache.py +++ b/src/neo4j/_sync/home_db_cache.py @@ -28,12 +28,8 @@ if t.TYPE_CHECKING: import typing_extensions as te - TKey: te.TypeAlias = t.Union[ - str, - t.Tuple[t.Tuple[str, t.Hashable], ...], - t.Tuple[None], - ] - TVal: te.TypeAlias = t.Tuple[float, str] + TKey: te.TypeAlias = str | tuple[tuple[str, t.Hashable], ...] | tuple[None] + TVal: te.TypeAlias = tuple[float, str] class HomeDbCache: diff --git a/src/neo4j/_sync/io/_bolt3.py b/src/neo4j/_sync/io/_bolt3.py index 3f4c93a3b..1ccf75c61 100644 --- a/src/neo4j/_sync/io/_bolt3.py +++ b/src/neo4j/_sync/io/_bolt3.py @@ -309,7 +309,8 @@ def route( self.send_all() self.fetch_all() return [ - dict(zip(metadata.get("fields", ()), values)) for values in records + dict(zip(metadata.get("fields", ()), values, strict=True)) + for values in records ] def run( diff --git a/src/neo4j/_sync/io/_bolt4.py b/src/neo4j/_sync/io/_bolt4.py index 19c719240..f05fcf360 100644 --- a/src/neo4j/_sync/io/_bolt4.py +++ b/src/neo4j/_sync/io/_bolt4.py @@ -224,7 +224,8 @@ def route( self.send_all() self.fetch_all() return [ - dict(zip(metadata.get("fields", ()), values)) for values in records + dict(zip(metadata.get("fields", ()), values, strict=True)) + for values in records ] def run( diff --git a/src/neo4j/_sync/work/result.py b/src/neo4j/_sync/work/result.py index 3e0337a60..2c01788d1 100644 --- a/src/neo4j/_sync/work/result.py +++ b/src/neo4j/_sync/work/result.py @@ -78,7 +78,7 @@ _driver_dir = _driver_dir.parent _T = t.TypeVar("_T") -_TResultKey = t.Union[int, str] +_TResultKey: t.TypeAlias = int | str _RESULT_FAILED_ERROR = ( @@ -247,7 +247,8 @@ def on_records(records): for record in records ) self._record_buffer.extend( - Record(zip(self._keys, record)) for record in records + Record(zip(self._keys, record, strict=True)) + for record in records ) def _on_summary(): @@ -420,7 +421,7 @@ def __next__(self) -> Record: :raises StopIteration: if no more records are available. """ - return self.__iter__().__next__() + return next(iter(self)) def _attach(self): # Set the Result object in an attached state by fetching messages @@ -918,7 +919,7 @@ def to_df( else: # The rows have different keys. We need to pass a list # of dicts to pandas - rows = [dict(zip(df_keys, r)) for r in rows] + rows = [dict(zip(df_keys, r, strict=True)) for r in rows] df_keys = False rows.append(row) if df_keys is False: diff --git a/src/neo4j/_work/summary.py b/src/neo4j/_work/summary.py index 7428db7e5..3dc296174 100644 --- a/src/neo4j/_work/summary.py +++ b/src/neo4j/_work/summary.py @@ -36,7 +36,7 @@ from ..addressing import Address from ..api import ServerInfo - _T = te.TypeVar("_T") + _T = t.TypeVar("_T") class ResultSummary: diff --git a/src/neo4j/api.py b/src/neo4j/api.py index ce9bcbe5d..7cad5e343 100644 --- a/src/neo4j/api.py +++ b/src/neo4j/api.py @@ -163,7 +163,7 @@ def __eq__(self, other: t.Any) -> bool: AuthToken = Auth if t.TYPE_CHECKING: - _TAuth = t.Union[t.Tuple[str, str], Auth, None] + _TAuth: t.TypeAlias = tuple[str, str] | Auth | None def basic_auth(user: str, password: str, realm: str | None = None) -> Auth: @@ -385,8 +385,7 @@ def protocol_version(self) -> tuple[int, int]: This is returned as a 2-tuple:class:`tuple` (subclass) of ``(major, minor)`` integers. """ - # TODO: 6.0 - remove cast when support for Python 3.7 is dropped - return t.cast(t.Tuple[int, int], self._protocol_version) + return self._protocol_version @property def agent(self) -> str: @@ -416,7 +415,7 @@ def update(self, metadata: dict) -> None: # As far the user is concerned, protocol versions should simply be a # tuple[int, int]. if t.TYPE_CHECKING: - _version_base = t.Tuple[int, int] + _version_base = tuple[int, int] else: _version_base = tuple diff --git a/src/neo4j/exceptions.py b/src/neo4j/exceptions.py index affca2794..0a53ecbc1 100644 --- a/src/neo4j/exceptions.py +++ b/src/neo4j/exceptions.py @@ -125,24 +125,15 @@ Transaction, ) - _TTransaction = t.Union[ - AsyncManagedTransaction, - AsyncTransaction, - ManagedTransaction, - Transaction, - ] - _TResult = t.Union[AsyncResult, Result] - _TSession = t.Union[AsyncSession, Session] + _TTransaction: t.TypeAlias = ( + AsyncManagedTransaction + | AsyncTransaction + | ManagedTransaction + | Transaction + ) + _TResult: t.TypeAlias = AsyncResult | Result + _TSession: t.TypeAlias = AsyncSession | Session _T = t.TypeVar("_T") -else: - _TTransaction = t.Union[ - "AsyncManagedTransaction", - "AsyncTransaction", - "ManagedTransaction", - "Transaction", - ] - _TResult = t.Union["AsyncResult", "Result"] - _TSession = t.Union["AsyncSession", "Session"] __all__ = [ diff --git a/src/neo4j/graph/__init__.py b/src/neo4j/graph/__init__.py index 43be39f14..e0ad0fc46 100644 --- a/src/neo4j/graph/__init__.py +++ b/src/neo4j/graph/__init__.py @@ -76,7 +76,7 @@ def relationship_type(self, name: str) -> type[Relationship]: cls = self._relationship_types[name] except KeyError: cls = self._relationship_types[name] = t.cast( - t.Type[Relationship], type(str(name), (Relationship,), {}) + type[Relationship], type(str(name), (Relationship,), {}) ) return cls diff --git a/src/neo4j/time/__init__.py b/src/neo4j/time/__init__.py index cfee552d7..641b3f0a2 100644 --- a/src/neo4j/time/__init__.py +++ b/src/neo4j/time/__init__.py @@ -382,7 +382,7 @@ def utc_time(self): class Duration( # type: ignore[misc] - t.Tuple[int, int, int, int], duration_base_class + tuple[int, int, int, int], duration_base_class ): r""" A difference between two points in time. @@ -470,8 +470,7 @@ def __new__( ) if not MIN_INT64 <= avg_total_seconds <= MAX_INT64: raise ValueError(f"Duration value out of range: {tuple_!r}") - # TODO: 6.0 - remove type ignore when support for Python 3.7 is dropped - return tuple.__new__(cls, tuple_) # type: ignore[type-var] + return tuple.__new__(cls, tuple_) def __bool__(self) -> bool: """Falsy if all primary instance attributes are.""" diff --git a/src/neo4j/time/_arithmetic.py b/src/neo4j/time/_arithmetic.py index b08ff48f8..0b33b687f 100644 --- a/src/neo4j/time/_arithmetic.py +++ b/src/neo4j/time/_arithmetic.py @@ -14,10 +14,7 @@ # limitations under the License. -from typing import ( - Tuple, - TypeVar, -) +from typing import TypeVar __all__ = [ @@ -94,7 +91,7 @@ def nano_divmod(x, y): def symmetric_divmod( dividend: _TDividend, divisor: float -) -> Tuple[int, _TDividend]: +) -> tuple[int, _TDividend]: number = type(dividend) if dividend >= 0: quotient, remainder = divmod(dividend, divisor) diff --git a/testkit/Dockerfile b/testkit/Dockerfile index e61135da1..56fc59bee 100644 --- a/testkit/Dockerfile +++ b/testkit/Dockerfile @@ -42,7 +42,7 @@ ENV PYENV_ROOT=/.pyenv ENV PATH="$PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH" # Setup python version -ENV PYTHON_VERSIONS="3.13 3.12 3.11 3.10 3.9 3.8 3.7" +ENV PYTHON_VERSIONS="3.13 3.12 3.11 3.10" RUN for version in $PYTHON_VERSIONS; do \ pyenv install $version; \ diff --git a/testkitbackend/_async/requests.py b/testkitbackend/_async/requests.py index f08bb30de..2239b2005 100644 --- a/testkitbackend/_async/requests.py +++ b/testkitbackend/_async/requests.py @@ -919,7 +919,7 @@ async def result_next(backend, data): result = backend.results[data["resultId"]] try: - record = await AsyncUtil.next(AsyncUtil.iter(result)) + record = await anext(aiter(result)) except StopAsyncIteration: await backend.send_response("NullRecord", {}) return diff --git a/testkitbackend/_sync/requests.py b/testkitbackend/_sync/requests.py index e960ad8fc..baa34a950 100644 --- a/testkitbackend/_sync/requests.py +++ b/testkitbackend/_sync/requests.py @@ -919,7 +919,7 @@ def result_next(backend, data): result = backend.results[data["resultId"]] try: - record = Util.next(Util.iter(result)) + record = next(iter(result)) except StopIteration: backend.send_response("NullRecord", {}) return diff --git a/tests/conftest.py b/tests/conftest.py index b6cdd7f7f..edd15539e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -204,26 +204,6 @@ def watcher(): yield -# TODO: 6.0 - -# when support for Python 3.7 is dropped and pytest-asyncio is bumped -# check if this fixture is still needed -@pytest.fixture -def event_loop(): - # Overwriting the default event loop injected by pytest-asyncio - # because its implementation doesn't properly shut down the loop - # (e.g., it doesn't call `shutdown_asyncgens`) - policy = asyncio.get_event_loop_policy() - loop = policy.new_event_loop() - yield loop - try: - _cancel_all_tasks(loop) - loop.run_until_complete(loop.shutdown_asyncgens()) - if sys.version_info >= (3, 9): - loop.run_until_complete(loop.shutdown_default_executor()) - finally: - loop.close() - - def _cancel_all_tasks(loop): # Copied from Python 3.13's asyncio package with minor modifications # in exception wording and variable naming diff --git a/tests/env.py b/tests/env.py index baf42aad7..2c99005ad 100644 --- a/tests/env.py +++ b/tests/env.py @@ -16,7 +16,6 @@ import abc import sys -import typing as t from os import environ @@ -27,7 +26,7 @@ def eval(self): class _LazyEvalEnv(_LazyEval): - def __init__(self, env_key, type_: t.Type = str, default=...): + def __init__(self, env_key, type_: type = str, default=...): self.env_key = env_key self.type_ = type_ self.default = default diff --git a/tests/integration/async_/test_custom_ssl_context.py b/tests/integration/async_/test_custom_ssl_context.py index dd592d89e..8739dc569 100644 --- a/tests/integration/async_/test_custom_ssl_context.py +++ b/tests/integration/async_/test_custom_ssl_context.py @@ -38,12 +38,14 @@ def wrap_fail(*_, **__): fake_ssl_context.wrap_socket.side_effect = wrap_fail fake_ssl_context.wrap_bio.side_effect = wrap_fail - async with neo4j.AsyncGraphDatabase.driver( - uri, auth=auth, ssl_context=fake_ssl_context - ) as driver: - async with driver.session() as session: - with pytest.raises(NoNeedToGoFurtherError): - await session.run("RETURN 1") + async with ( + neo4j.AsyncGraphDatabase.driver( + uri, auth=auth, ssl_context=fake_ssl_context + ) as driver, + driver.session() as session, + ): + with pytest.raises(NoNeedToGoFurtherError): + await session.run("RETURN 1") assert ( fake_ssl_context.wrap_socket.call_count diff --git a/tests/integration/mixed/test_async_driver.py b/tests/integration/mixed/test_async_driver.py index ac0faf649..550fb7d4e 100644 --- a/tests/integration/mixed/test_async_driver.py +++ b/tests/integration/mixed/test_async_driver.py @@ -18,28 +18,12 @@ # FIXME: activate lint and fix it import asyncio -import sys - -import pytest import neo4j -from ... import env from ..._async_compat import mark_async_test -# TODO: Python 3.9: when support gets dropped, remove this mark -@pytest.mark.xfail( - # direct driver is not making use of `asyncio.Lock`. - sys.version_info < (3, 10) and env.NEO4J_SCHEME == "neo4j", - reason=( - "asyncio's synchronization primitives can create a new event loop " - "if instantiated while there is no running event loop. This " - "changed with Python 3.10." - ), - raises=RuntimeError, - strict=True, -) def test_can_create_async_driver_outside_of_loop(uri, auth): pool_size = 2 # used to make sure the pool was full at least at some point diff --git a/tests/integration/sync/test_custom_ssl_context.py b/tests/integration/sync/test_custom_ssl_context.py index 94b9bd372..a4a68d51b 100644 --- a/tests/integration/sync/test_custom_ssl_context.py +++ b/tests/integration/sync/test_custom_ssl_context.py @@ -38,12 +38,14 @@ def wrap_fail(*_, **__): fake_ssl_context.wrap_socket.side_effect = wrap_fail fake_ssl_context.wrap_bio.side_effect = wrap_fail - with neo4j.GraphDatabase.driver( - uri, auth=auth, ssl_context=fake_ssl_context - ) as driver: - with driver.session() as session: - with pytest.raises(NoNeedToGoFurtherError): - session.run("RETURN 1") + with ( + neo4j.GraphDatabase.driver( + uri, auth=auth, ssl_context=fake_ssl_context + ) as driver, + driver.session() as session, + ): + with pytest.raises(NoNeedToGoFurtherError): + session.run("RETURN 1") assert ( fake_ssl_context.wrap_socket.call_count diff --git a/tests/unit/async_/test_auth_management.py b/tests/unit/async_/test_auth_management.py index da0116bcd..80f288ef4 100644 --- a/tests/unit/async_/test_auth_management.py +++ b/tests/unit/async_/test_auth_management.py @@ -98,7 +98,7 @@ def expiring_auth(*args, **kwargs): @pytest.mark.parametrize("auth", SAMPLE_AUTHS) @pytest.mark.parametrize("error", SAMPLE_ERRORS) async def test_static_manager( - auth: t.Union[t.Tuple[str, str], Auth, None], error: Neo4jError + auth: tuple[str, str] | Auth | None, error: Neo4jError ) -> None: manager: AsyncAuthManager = static_auth_manager(auth) assert await manager.get_auth() is auth @@ -120,8 +120,8 @@ async def test_static_manager( ) @pytest.mark.parametrize("error", SAMPLE_ERRORS) async def test_basic_manager_manual_expiry( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, error: Neo4jError, mocker, ) -> None: @@ -146,10 +146,10 @@ def return_value_generator(auth): @pytest.mark.parametrize("error", SAMPLE_ERRORS) @pytest.mark.parametrize("expires_at", (None, 0.001, 1, 1000.0)) async def test_bearer_manager_manual_expiry( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, error: Neo4jError, - expires_at: t.Optional[float], + expires_at: float | None, mocker, ) -> None: def return_value_generator(auth): @@ -174,9 +174,9 @@ def return_value_generator(auth): ) @pytest.mark.parametrize("expires_at", (None, -1, 1.0, 1, 1000.0)) async def test_bearer_manager_time_expiry( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], - expires_at: t.Optional[float], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, + expires_at: float | None, mocker, ) -> None: with freeze_time("1970-01-01 00:00:00") as frozen_time: @@ -209,11 +209,9 @@ async def test_bearer_manager_time_expiry( async def _test_manager( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], - return_value_generator: t.Callable[ - [t.Union[t.Tuple[str, str], Auth, None]], T - ], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, + return_value_generator: t.Callable[[tuple[str, str] | Auth | None], T], manager_factory: t.Callable[ [t.Callable[[], t.Awaitable[T]]], AsyncAuthManager ], diff --git a/tests/unit/async_/test_driver.py b/tests/unit/async_/test_driver.py index c26d94a87..8a87a1e49 100644 --- a/tests/unit/async_/test_driver.py +++ b/tests/unit/async_/test_driver.py @@ -527,38 +527,38 @@ async def get_certificate(self) -> ClientCertificate | None: if t.TYPE_CHECKING: - _T_NotificationMinimumSeverity = t.Union[ - NotificationMinimumSeverity, - te.Literal[ + _T_NotificationMinimumSeverity: t.TypeAlias = ( + NotificationMinimumSeverity + | te.Literal[ "OFF", "WARNING", "INFORMATION", - ], - ] + ] + ) - _T_NotificationDisabledCategory = t.Union[ - NotificationDisabledCategory, - te.Literal[ + _T_NotificationDisabledCategory: t.TypeAlias = ( + NotificationDisabledCategory + | te.Literal[ "HINT", "UNRECOGNIZED", "UNSUPPORTED", "PERFORMANCE", "DEPRECATION", "GENERIC", - ], - ] + ] + ) - _T_NotificationDisabledClassification = t.Union[ - NotificationDisabledClassification, - te.Literal[ + _T_NotificationDisabledClassification: t.TypeAlias = ( + NotificationDisabledClassification + | te.Literal[ "HINT", "UNRECOGNIZED", "UNSUPPORTED", "PERFORMANCE", "DEPRECATION", "GENERIC", - ], - ] + ] + ) if t.TYPE_CHECKING: @@ -764,11 +764,11 @@ async def test_warn_notification_severity_driver_config( ) -> None: if inspect.isclass(expected) and issubclass(expected, Exception): assert min_sev is not ... # makes no sense to test - with pytest.raises(expected): - with pytest.warns(PreviewWarning, match="notification warnings"): - AsyncGraphDatabase.driver( - uri, warn_notification_severity=min_sev - ) + with ( + pytest.raises(expected), + pytest.warns(PreviewWarning, match="notification warnings"), + ): + AsyncGraphDatabase.driver(uri, warn_notification_severity=min_sev) return if min_sev is ...: driver = AsyncGraphDatabase.driver(uri) diff --git a/tests/unit/async_/work/test_result.py b/tests/unit/async_/work/test_result.py index fb71105f3..0dcb74077 100644 --- a/tests/unit/async_/work/test_result.py +++ b/tests/unit/async_/work/test_result.py @@ -272,32 +272,32 @@ async def fetch_and_compare_all_records( elif method == "next": n = len(expected_records) if limit is None else limit for _ in range(n): - record = await AsyncUtil.next(result) + record = await anext(result) received_records.append([record.get(key, None)]) if limit is None: with pytest.raises(StopAsyncIteration): - await AsyncUtil.next(result) + await anext(result) assert result._exhausted elif method == "one iter": - iter_ = AsyncUtil.iter(result) + iter_ = aiter(result) n = len(expected_records) if limit is None else limit for _ in range(n): - record = await AsyncUtil.next(iter_) + record = await anext(iter_) received_records.append([record.get(key, None)]) if limit is None: with pytest.raises(StopAsyncIteration): - await AsyncUtil.next(iter_) + await anext(iter_) assert result._exhausted elif method == "new iter": n = len(expected_records) if limit is None else limit for _ in range(n): - iter_ = AsyncUtil.iter(result) - record = await AsyncUtil.next(iter_) + iter_ = aiter(result) + record = await anext(iter_) received_records.append([record.get(key, None)]) if limit is None: - iter_ = AsyncUtil.iter(result) + iter_ = aiter(result) with pytest.raises(StopAsyncIteration): - await AsyncUtil.next(iter_) + await anext(iter_) assert result._exhausted else: raise ValueError @@ -329,37 +329,37 @@ async def test_result_iteration_mixed_methods(): connection = AsyncConnectionStub(records=Records(["x"], records)) result = AsyncResult(connection, 4, None, noop, noop, None) await result._run("CYPHER", {}, None, None, "r", None, None, None) - iter1 = AsyncUtil.iter(result) - iter2 = AsyncUtil.iter(result) + iter1 = aiter(result) + iter2 = aiter(result) - record = await AsyncUtil.next(iter1) + record = await anext(iter1) assert record.get("x") == records[0][0] - record = await AsyncUtil.next(iter2) + record = await anext(iter2) assert record.get("x") == records[1][0] - record = await AsyncUtil.next(iter2) + record = await anext(iter2) assert record.get("x") == records[2][0] - record = await AsyncUtil.next(iter1) + record = await anext(iter1) assert record.get("x") == records[3][0] - record = await AsyncUtil.next(iter1) + record = await anext(iter1) assert record.get("x") == records[4][0] - record = await AsyncUtil.next(result) + record = await anext(result) assert record.get("x") == records[5][0] - record = await AsyncUtil.next(iter2) + record = await anext(iter2) assert record.get("x") == records[6][0] - record = await AsyncUtil.next(iter1) + record = await anext(iter1) assert record.get("x") == records[7][0] - record = await AsyncUtil.next(AsyncUtil.iter(result)) + record = await anext(aiter(result)) assert record.get("x") == records[8][0] assert [r.get("x") async for r in result] == [records[9][0]] with pytest.raises(StopAsyncIteration): - await AsyncUtil.next(iter1) + await anext(iter1) with pytest.raises(StopAsyncIteration): - await AsyncUtil.next(iter2) + await anext(iter2) with pytest.raises(StopAsyncIteration): - await AsyncUtil.next(result) + await anext(result) with pytest.raises(StopAsyncIteration): - await AsyncUtil.next(AsyncUtil.iter(result)) + await anext(aiter(result)) assert [r.get("x") async for r in result] == [] @@ -436,8 +436,8 @@ async def test_result_peek(records, fetch_size): else: assert isinstance(record, Record) assert record.get("x") == records[i][0] - iter_ = AsyncUtil.iter(result) - await AsyncUtil.next(iter_) # consume the record + iter_ = aiter(result) + await anext(iter_) # consume the record @pytest.mark.parametrize("records", ([[1], [2]], [[1]], [])) @@ -547,7 +547,7 @@ async def test_consume(records, consume_one, summary_meta, consume_times): await result._run("CYPHER", {}, None, None, "r", None, None, None) if consume_one: with suppress(StopAsyncIteration): - await AsyncUtil.next(AsyncUtil.iter(result)) + await anext(aiter(result)) for _ in range(consume_times): summary = await result.consume() assert isinstance(summary, ResultSummary) @@ -775,7 +775,7 @@ async def test_to_eager_result(records): assert len(eager_result.records) == len(records) assert all( list(record) == list(raw) - for record, raw in zip(eager_result.records, records) + for record, raw in zip(eager_result.records, records, strict=True) ) assert eager_result.summary is eager_result[1] @@ -794,10 +794,20 @@ async def test_to_eager_result(records): @pytest.mark.parametrize( ("keys", "values", "types", "instances"), ( - (["i"], list(zip(range(5))), ["int64"], None), - (["x"], list(zip((n - 0.5) / 5 for n in range(5))), ["float64"], None), - (["s"], list(zip(("foo", "bar", "baz", "foobar"))), ["object"], None), - (["l"], list(zip(([1, 2], [3, 4]))), ["object"], None), + (["i"], list(zip(range(5), strict=True)), ["int64"], None), + ( + ["x"], + list(zip(((n - 0.5) / 5 for n in range(5)), strict=True)), + ["float64"], + None, + ), + ( + ["s"], + list(zip(("foo", "bar", "baz", "foobar"), strict=True)), + ["object"], + None, + ), + (["l"], list(zip(([1, 2], [3, 4]), strict=True)), ["object"], None), ( ["n"], list( @@ -815,7 +825,8 @@ async def test_to_eager_result(records): {"a": [1, "a"]}, "cool_id", ), - ) + ), + strict=True, ) ), ["object"], @@ -841,7 +852,8 @@ async def test_to_eager_result(records): "1337", "69", ), - ) + ), + strict=True, ) ), ["object"], @@ -881,7 +893,7 @@ async def test_to_df(keys, values, types, instances, test_default_expand): ( ( ["i"], - list(zip(range(5))), + list(zip(range(5), strict=True)), ["i"], [[0], [1], [2], [3], [4]], ["int64"], @@ -889,35 +901,35 @@ async def test_to_df(keys, values, types, instances, test_default_expand): # test variable name escaping ( ["i.[]->.().{}.\\"], - list(zip(range(5))), + list(zip(range(5), strict=True)), ["i\\.[]->\\.()\\.{}\\.\\\\"], [[0], [1], [2], [3], [4]], ["int64"], ), ( ["x"], - list(zip((n - 0.5) / 5 for n in range(5))), + list(zip(((n - 0.5) / 5 for n in range(5)), strict=True)), ["x"], [[-0.1], [0.1], [0.3], [0.5], [0.7]], ["float64"], ), ( ["s"], - list(zip(("foo", "bar", "baz", "foobar"))), + list(zip(("foo", "bar", "baz", "foobar"), strict=True)), ["s"], [["foo"], ["bar"], ["baz"], ["foobar"]], ["object"], ), ( ["l"], - list(zip(([1, 2], [3, 4]))), + list(zip(([1, 2], [3, 4]), strict=True)), ["l[].0", "l[].1"], [[1, 2], [3, 4]], ["int64", "int64"], ), ( ["l"], - list(zip(([1, 2], [3, 4, 5], [6]))), + list(zip(([1, 2], [3, 4, 5], [6]), strict=True)), ["l[].0", "l[].1", "l[].2"], [[1, 2, None], [3, 4, 5], [6, None, None]], # pandas turns None in int columns into NaN @@ -926,7 +938,9 @@ async def test_to_df(keys, values, types, instances, test_default_expand): ), ( ["d"], - list(zip(({"a": 1, "b": 2}, {"a": 3, "b": 4, "": 0}))), + list( + zip(({"a": 1, "b": 2}, {"a": 3, "b": 4, "": 0}), strict=True) + ), ["d{}.a", "d{}.b", "d{}."], [[1, 2, None], [3, 4, 0]], ["int64", "int64", "float64"], @@ -934,14 +948,14 @@ async def test_to_df(keys, values, types, instances, test_default_expand): # test key escaping ( ["d"], - list(zip(({"a.[]\\{}->.().{}.": 1, "b": 2},))), + list(zip(({"a.[]\\{}->.().{}.": 1, "b": 2},), strict=True)), ["d{}.a\\.[]\\\\{}->\\.()\\.{}\\.", "d{}.b"], [[1, 2]], ["int64", "int64"], ), ( ["d"], - list(zip(({"a": 1, "b": 2}, {"a": 3, "c": 4}))), + list(zip(({"a": 1, "b": 2}, {"a": 3, "c": 4}), strict=True)), ["d{}.a", "d{}.b", "d{}.c"], [[1, 2, None], [3, None, 4]], # pandas turns None in int columns into NaN @@ -950,7 +964,12 @@ async def test_to_df(keys, values, types, instances, test_default_expand): ), ( ["x"], - list(zip(([{"foo": "bar", "baz": [42, 0.1]}, "foobar"],))), + list( + zip( + ([{"foo": "bar", "baz": [42, 0.1]}, "foobar"],), + strict=True, + ) + ), ["x[].0{}.foo", "x[].0{}.baz[].0", "x[].0{}.baz[].1", "x[].1"], [["bar", 42, 0.1, "foobar"]], ["object", "int64", "float64", "object"], @@ -981,7 +1000,8 @@ async def test_to_df(keys, values, types, instances, test_default_expand): ["LABEL_A", "LABEL_B"], {"a": [1, "a"], "d": 3}, ), - ) + ), + strict=True, ) ), [ @@ -1033,7 +1053,8 @@ async def test_to_df(keys, values, types, instances, test_default_expand): "r-1337", "r-69", ), - ) + ), + strict=True, ) ), [ diff --git a/tests/unit/async_/work/test_session.py b/tests/unit/async_/work/test_session.py index 743c7573a..89195f060 100644 --- a/tests/unit/async_/work/test_session.py +++ b/tests/unit/async_/work/test_session.py @@ -135,11 +135,13 @@ async def test_opens_connection_on_tx_begin(async_fake_pool): async def test_keeps_connection_on_tx_run( async_fake_pool, test_run_args, repetitions ): - async with AsyncSession(async_fake_pool, SessionConfig()) as session: - async with await session.begin_transaction() as tx: - for _ in range(repetitions): - await tx.run(*test_run_args) - assert session._connection is not None + async with ( + AsyncSession(async_fake_pool, SessionConfig()) as session, + await session.begin_transaction() as tx, + ): + for _ in range(repetitions): + await tx.run(*test_run_args) + assert session._connection is not None @pytest.mark.parametrize( @@ -150,12 +152,14 @@ async def test_keeps_connection_on_tx_run( async def test_keeps_connection_on_tx_consume( async_fake_pool, test_run_args, repetitions ): - async with AsyncSession(async_fake_pool, SessionConfig()) as session: - async with await session.begin_transaction() as tx: - for _ in range(repetitions): - result = await tx.run(*test_run_args) - await result.consume() - assert session._connection is not None + async with ( + AsyncSession(async_fake_pool, SessionConfig()) as session, + await session.begin_transaction() as tx, + ): + for _ in range(repetitions): + result = await tx.run(*test_run_args) + await result.consume() + assert session._connection is not None @pytest.mark.parametrize( diff --git a/tests/unit/async_/work/test_transaction.py b/tests/unit/async_/work/test_transaction.py index 787fae825..98e198f1e 100644 --- a/tests/unit/async_/work/test_transaction.py +++ b/tests/unit/async_/work/test_transaction.py @@ -346,18 +346,18 @@ async def test_server_error_propagates(async_scripted_connection, error): tx = AsyncTransaction(connection, 2, None, noop, noop, noop, None) res1 = await tx.run("UNWIND range(1, 1000) AS n RETURN n") - assert await res1.__anext__() == {"n": 1} + assert await anext(res1) == {"n": 1} res2 = await tx.run("RETURN 'causes error later'") assert await res2.fetch(2) == [{"n": 1}, {"n": 2}] with pytest.raises(expected_error) as exc1: - await res2.__anext__() + await anext(res2) # can finish the buffer assert await res1.fetch(1) == [{"n": 2}] # then fails because the connection was broken by res2 with pytest.raises(ResultFailedError) as exc2: - await res1.__anext__() + await anext(res1) assert exc1.value is exc2.value.__cause__ diff --git a/tests/unit/common/codec/packstream/v1/test_packstream.py b/tests/unit/common/codec/packstream/v1/test_packstream.py index ad5c16916..ab370feb8 100644 --- a/tests/unit/common/codec/packstream/v1/test_packstream.py +++ b/tests/unit/common/codec/packstream/v1/test_packstream.py @@ -84,9 +84,13 @@ def _pack(*values, dehydration_hooks=None): def assert_packable(packer_with_buffer, unpacker_with_buffer): def _recursive_nan_equal(a, b): if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)): - return all(_recursive_nan_equal(x, y) for x, y in zip(a, b)) + return len(a) == len(b) and all( + _recursive_nan_equal(x, y) for x, y in zip(a, b, strict=True) + ) elif isinstance(a, dict) and isinstance(b, dict): - return all(_recursive_nan_equal(a[k], b[k]) for k in a) + return len(a) == len(b) and all( + _recursive_nan_equal(a[k], b[k]) for k in a + ) else: return a == b or (isnan(a) and isnan(b)) diff --git a/tests/unit/common/test_record.py b/tests/unit/common/test_record.py index 0f06fbe2c..c726bec39 100644 --- a/tests/unit/common/test_record.py +++ b/tests/unit/common/test_record.py @@ -48,45 +48,67 @@ def test_record_equality() -> None: - record1 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) - record2 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) - record3 = Record(zip(["name", "empire"], ["Stefan", "Das Deutschland"])) + record1 = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) + record2 = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) + record3 = Record( + zip(["name", "empire"], ["Stefan", "Das Deutschland"], strict=True) + ) assert record1 == record2 assert record1 != record3 assert record2 != record3 def test_record_hashing() -> None: - record1 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) - record2 = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) - record3 = Record(zip(["name", "empire"], ["Stefan", "Das Deutschland"])) + record1 = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) + record2 = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) + record3 = Record( + zip(["name", "empire"], ["Stefan", "Das Deutschland"], strict=True) + ) assert hash(record1) == hash(record2) assert hash(record1) != hash(record3) assert hash(record2) != hash(record3) def test_record_iter() -> None: - a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) + a_record = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) assert list(iter(a_record)) == ["Nigel", "The British Empire"] def test_record_as_dict() -> None: - a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) + a_record = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) assert dict(a_record) == {"name": "Nigel", "empire": "The British Empire"} def test_record_as_list() -> None: - a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) + a_record = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) assert list(a_record) == ["Nigel", "The British Empire"] def test_record_len() -> None: - a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) + a_record = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) assert len(a_record) == 2 def test_record_repr() -> None: - a_record = Record(zip(["name", "empire"], ["Nigel", "The British Empire"])) + a_record = Record( + zip(["name", "empire"], ["Nigel", "The British Empire"], strict=True) + ) assert ( repr(a_record) == "" ) @@ -158,7 +180,9 @@ def _record_data_make_path() -> Path: ), ) def test_record_data_keys(keys, expected) -> None: - record = Record(zip(_RECORD_DATA_ALICE_KEYS, _RECORD_DATA_ALICE_VALUES)) + record = Record( + zip(_RECORD_DATA_ALICE_KEYS, _RECORD_DATA_ALICE_VALUES, strict=True) + ) assert record.data(*keys) == expected @@ -168,7 +192,7 @@ def test_record_data_keys(keys, expected) -> None: *( (value, value) for value in t.cast( - t.Tuple[t.Any], + tuple[t.Any], ( None, True, @@ -257,18 +281,24 @@ def test_record_data_types(value, expected, wrapper) -> None: def test_record_index_error() -> None: - record = Record(zip(_RECORD_DATA_ALICE_KEYS, _RECORD_DATA_ALICE_VALUES)) + record = Record( + zip(_RECORD_DATA_ALICE_KEYS, _RECORD_DATA_ALICE_VALUES, strict=True) + ) with pytest.raises(IndexError): record.data(1, 0, 999) def test_record_keys() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r.keys() == ["name", "age", "married"] def test_record_values() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r.values() == ["Alice", 33, True] assert r.values("name") == ["Alice"] assert r.values("age", "name") == [33, "Alice"] @@ -281,7 +311,9 @@ def test_record_values() -> None: def test_record_items() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r.items() == [("name", "Alice"), ("age", 33), ("married", True)] assert r.items("name") == [("name", "Alice")] assert r.items("age", "name") == [("age", 33), ("name", "Alice")] @@ -298,7 +330,9 @@ def test_record_items() -> None: def test_record_index() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r.index("name") == 0 assert r.index("age") == 1 assert r.index("married") == 2 @@ -314,7 +348,9 @@ def test_record_index() -> None: def test_record_value() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r.value() == "Alice" assert r.value("name") == "Alice" assert r.value("age") == 33 @@ -331,7 +367,9 @@ def test_record_value() -> None: def test_record_value_kwargs() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r.value() == "Alice" assert r.value(key="name") == "Alice" assert r.value(key="age") == 33 @@ -346,7 +384,9 @@ def test_record_value_kwargs() -> None: def test_record_contains() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert "Alice" in r assert 33 in r assert True in r @@ -362,27 +402,35 @@ def test_record_from_dict() -> None: def test_record_get_slice() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) - assert Record(zip(["name", "age"], ["Alice", 33])) == r[0:2] + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) + assert Record(zip(["name", "age"], ["Alice", 33], strict=True)) == r[0:2] def test_record_get_by_index() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r[0] == "Alice" def test_record_get_by_name() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r["name"] == "Alice" def test_record_get_by_out_of_bounds_index() -> None: - r = Record(zip(["name", "age", "married"], ["Alice", 33, True])) + r = Record( + zip(["name", "age", "married"], ["Alice", 33, True], strict=True) + ) assert r[9] is None def test_record_get_item() -> None: - r = Record(zip(["x", "y"], ["foo", "bar"])) + r = Record(zip(["x", "y"], ["foo", "bar"], strict=True)) assert r["x"] == "foo" assert r["y"] == "bar" with pytest.raises(KeyError): @@ -407,52 +455,52 @@ def test_record_repr_generic(len_: int) -> None: ("raw", "keys", "serialized"), ( ( - zip(["x", "y", "z"], [1, 2, 3]), + zip(["x", "y", "z"], [1, 2, 3], strict=True), (), {"x": 1, "y": 2, "z": 3}, ), ( - zip(["x", "y", "z"], [1, 2, 3]), + zip(["x", "y", "z"], [1, 2, 3], strict=True), (1, 2), {"y": 2, "z": 3}, ), ( - zip(["x", "y", "z"], [1, 2, 3]), + zip(["x", "y", "z"], [1, 2, 3], strict=True), ("z", "x"), {"x": 1, "z": 3}, ), ( - zip(["x"], [None]), + zip(["x"], [None], strict=True), (), {"x": None}, ), ( - zip(["x", "y"], [True, False]), + zip(["x", "y"], [True, False], strict=True), (), {"x": True, "y": False}, ), ( - zip(["x", "y", "z"], [0.0, 1.0, 3.141592653589]), + zip(["x", "y", "z"], [0.0, 1.0, 3.141592653589], strict=True), (), {"x": 0.0, "y": 1.0, "z": 3.141592653589}, ), ( - zip(["x"], ["hello, world"]), + zip(["x"], ["hello, world"], strict=True), (), {"x": "hello, world"}, ), ( - zip(["x"], [bytearray([1, 2, 3])]), + zip(["x"], [bytearray([1, 2, 3])], strict=True), (), {"x": bytearray([1, 2, 3])}, ), ( - zip(["x"], [[1, 2, 3]]), + zip(["x"], [[1, 2, 3]], strict=True), (), {"x": [1, 2, 3]}, ), ( - zip(["x"], [{"one": 1, "two": 2}]), + zip(["x"], [{"one": 1, "two": 2}], strict=True), (), {"x": {"one": 1, "two": 2}}, ), @@ -468,6 +516,7 @@ def test_record_repr_generic(len_: int) -> None: {"name": "Alice"}, ) ], + strict=True, ), (), {"a": {"name": "Alice"}}, @@ -486,7 +535,9 @@ def test_data_relationship() -> None: alice_knows_bob = gh.hydrate_relationship( 1, 1, 2, "KNOWS", {"since": 1999} ) - record = Record(zip(["a", "b", "r"], [alice, bob, alice_knows_bob])) + record = Record( + zip(["a", "b", "r"], [alice, bob, alice_knows_bob], strict=True) + ) assert record.data() == { "a": {"name": "Alice", "age": 33}, "b": {"name": "Bob", "age": 44}, @@ -504,7 +555,7 @@ def test_data_unbound_relationship() -> None: some_one_knows_some_one = gh.hydrate_relationship( 1, 42, 43, "KNOWS", {"since": 1999} ) - record = Record(zip(["r"], [some_one_knows_some_one])) + record = Record(zip(["r"], [some_one_knows_some_one], strict=True)) assert record.data() == {"r": ({}, "KNOWS", {})} @@ -524,7 +575,7 @@ def test_data_path(cyclic) -> None: ] path = gh.hydrate_path([alice, bob, carol], r, [1, 1, -2, 2]) - record = Record(zip(["r"], [path])) + record = Record(zip(["r"], [path], strict=True)) assert record.data() == { "r": [dict(alice), "KNOWS", dict(bob), "DISLIKES", dict(carol)] } diff --git a/tests/unit/common/time/test_datetime.py b/tests/unit/common/time/test_datetime.py index 46be5ffd3..715eff4d7 100644 --- a/tests/unit/common/time/test_datetime.py +++ b/tests/unit/common/time/test_datetime.py @@ -20,8 +20,8 @@ import itertools import operator import pickle -import sys import typing as t +import zoneinfo from datetime import ( datetime, timedelta, @@ -195,23 +195,18 @@ def test_now_with_timezone_utc_tz(self) -> None: assert t.dst() is None assert t.tzname() == "UTC" - if sys.version_info >= (3, 9): - - def test_now_with_zoneinfo_utc_tz(self) -> None: - # not fully supported tzinfo implementation - import zoneinfo - - t = DateTime.now(zoneinfo.ZoneInfo("UTC")) - assert t.year == 1970 - assert t.month == 1 - assert t.day == 1 - assert t.hour == 12 - assert t.minute == 34 - assert t.second == 56 - assert t.nanosecond == 789000001 - assert t.utcoffset() == timedelta(seconds=0) - assert t.dst() == timedelta(seconds=0) - assert t.tzname() == "UTC" + def test_now_with_zoneinfo_utc_tz(self) -> None: + t = DateTime.now(zoneinfo.ZoneInfo("UTC")) + assert t.year == 1970 + assert t.month == 1 + assert t.day == 1 + assert t.hour == 12 + assert t.minute == 34 + assert t.second == 56 + assert t.nanosecond == 789000001 + assert t.utcoffset() == timedelta(seconds=0) + assert t.dst() == timedelta(seconds=0) + assert t.tzname() == "UTC" def test_utc_now(self) -> None: t = DateTime.utc_now() diff --git a/tests/unit/common/work/test_summary.py b/tests/unit/common/work/test_summary.py index 46c85c539..447df46cd 100644 --- a/tests/unit/common/work/test_summary.py +++ b/tests/unit/common/work/test_summary.py @@ -443,7 +443,9 @@ def test_gql_statuses_keep_order( assert len(status_objects) == len(types) status: GqlStatusObject - for i, (type_, status) in enumerate(zip(types, status_objects)): + for i, (type_, status) in enumerate( + zip(types, status_objects, strict=True) + ): StatusOrderHelper.assert_status_data_matches(status, i, type_) @@ -765,7 +767,9 @@ def test_summary_summary_notifications( return assert summary_in is not None - for notification_out, notification_in in zip(summary_out, summary_in): + for notification_out, notification_in in zip( + summary_out, summary_in, strict=True + ): code_out: str = notification_out.code code_in = notification_in.get("code", "") assert code_out == code_in diff --git a/tests/unit/sync/test_auth_management.py b/tests/unit/sync/test_auth_management.py index 84e6d2514..0012cd986 100644 --- a/tests/unit/sync/test_auth_management.py +++ b/tests/unit/sync/test_auth_management.py @@ -98,7 +98,7 @@ def expiring_auth(*args, **kwargs): @pytest.mark.parametrize("auth", SAMPLE_AUTHS) @pytest.mark.parametrize("error", SAMPLE_ERRORS) def test_static_manager( - auth: t.Union[t.Tuple[str, str], Auth, None], error: Neo4jError + auth: tuple[str, str] | Auth | None, error: Neo4jError ) -> None: manager: AuthManager = static_auth_manager(auth) assert manager.get_auth() is auth @@ -120,8 +120,8 @@ def test_static_manager( ) @pytest.mark.parametrize("error", SAMPLE_ERRORS) def test_basic_manager_manual_expiry( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, error: Neo4jError, mocker, ) -> None: @@ -146,10 +146,10 @@ def return_value_generator(auth): @pytest.mark.parametrize("error", SAMPLE_ERRORS) @pytest.mark.parametrize("expires_at", (None, 0.001, 1, 1000.0)) def test_bearer_manager_manual_expiry( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, error: Neo4jError, - expires_at: t.Optional[float], + expires_at: float | None, mocker, ) -> None: def return_value_generator(auth): @@ -174,9 +174,9 @@ def return_value_generator(auth): ) @pytest.mark.parametrize("expires_at", (None, -1, 1.0, 1, 1000.0)) def test_bearer_manager_time_expiry( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], - expires_at: t.Optional[float], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, + expires_at: float | None, mocker, ) -> None: with freeze_time("1970-01-01 00:00:00") as frozen_time: @@ -209,11 +209,9 @@ def test_bearer_manager_time_expiry( def _test_manager( - auth1: t.Union[t.Tuple[str, str], Auth, None], - auth2: t.Union[t.Tuple[str, str], Auth, None], - return_value_generator: t.Callable[ - [t.Union[t.Tuple[str, str], Auth, None]], T - ], + auth1: tuple[str, str] | Auth | None, + auth2: tuple[str, str] | Auth | None, + return_value_generator: t.Callable[[tuple[str, str] | Auth | None], T], manager_factory: t.Callable[ [t.Callable[[], t.Union[T]]], AuthManager ], diff --git a/tests/unit/sync/test_driver.py b/tests/unit/sync/test_driver.py index e1173f6b0..2d22b41af 100644 --- a/tests/unit/sync/test_driver.py +++ b/tests/unit/sync/test_driver.py @@ -526,38 +526,38 @@ def get_certificate(self) -> ClientCertificate | None: if t.TYPE_CHECKING: - _T_NotificationMinimumSeverity = t.Union[ - NotificationMinimumSeverity, - te.Literal[ + _T_NotificationMinimumSeverity: t.TypeAlias = ( + NotificationMinimumSeverity + | te.Literal[ "OFF", "WARNING", "INFORMATION", - ], - ] + ] + ) - _T_NotificationDisabledCategory = t.Union[ - NotificationDisabledCategory, - te.Literal[ + _T_NotificationDisabledCategory: t.TypeAlias = ( + NotificationDisabledCategory + | te.Literal[ "HINT", "UNRECOGNIZED", "UNSUPPORTED", "PERFORMANCE", "DEPRECATION", "GENERIC", - ], - ] + ] + ) - _T_NotificationDisabledClassification = t.Union[ - NotificationDisabledClassification, - te.Literal[ + _T_NotificationDisabledClassification: t.TypeAlias = ( + NotificationDisabledClassification + | te.Literal[ "HINT", "UNRECOGNIZED", "UNSUPPORTED", "PERFORMANCE", "DEPRECATION", "GENERIC", - ], - ] + ] + ) if t.TYPE_CHECKING: @@ -763,11 +763,11 @@ def test_warn_notification_severity_driver_config( ) -> None: if inspect.isclass(expected) and issubclass(expected, Exception): assert min_sev is not ... # makes no sense to test - with pytest.raises(expected): - with pytest.warns(PreviewWarning, match="notification warnings"): - GraphDatabase.driver( - uri, warn_notification_severity=min_sev - ) + with ( + pytest.raises(expected), + pytest.warns(PreviewWarning, match="notification warnings"), + ): + GraphDatabase.driver(uri, warn_notification_severity=min_sev) return if min_sev is ...: driver = GraphDatabase.driver(uri) diff --git a/tests/unit/sync/work/test_result.py b/tests/unit/sync/work/test_result.py index 65249303f..b4324805d 100644 --- a/tests/unit/sync/work/test_result.py +++ b/tests/unit/sync/work/test_result.py @@ -272,32 +272,32 @@ def fetch_and_compare_all_records( elif method == "next": n = len(expected_records) if limit is None else limit for _ in range(n): - record = Util.next(result) + record = next(result) received_records.append([record.get(key, None)]) if limit is None: with pytest.raises(StopIteration): - Util.next(result) + next(result) assert result._exhausted elif method == "one iter": - iter_ = Util.iter(result) + iter_ = iter(result) n = len(expected_records) if limit is None else limit for _ in range(n): - record = Util.next(iter_) + record = next(iter_) received_records.append([record.get(key, None)]) if limit is None: with pytest.raises(StopIteration): - Util.next(iter_) + next(iter_) assert result._exhausted elif method == "new iter": n = len(expected_records) if limit is None else limit for _ in range(n): - iter_ = Util.iter(result) - record = Util.next(iter_) + iter_ = iter(result) + record = next(iter_) received_records.append([record.get(key, None)]) if limit is None: - iter_ = Util.iter(result) + iter_ = iter(result) with pytest.raises(StopIteration): - Util.next(iter_) + next(iter_) assert result._exhausted else: raise ValueError @@ -329,37 +329,37 @@ def test_result_iteration_mixed_methods(): connection = ConnectionStub(records=Records(["x"], records)) result = Result(connection, 4, None, noop, noop, None) result._run("CYPHER", {}, None, None, "r", None, None, None) - iter1 = Util.iter(result) - iter2 = Util.iter(result) + iter1 = iter(result) + iter2 = iter(result) - record = Util.next(iter1) + record = next(iter1) assert record.get("x") == records[0][0] - record = Util.next(iter2) + record = next(iter2) assert record.get("x") == records[1][0] - record = Util.next(iter2) + record = next(iter2) assert record.get("x") == records[2][0] - record = Util.next(iter1) + record = next(iter1) assert record.get("x") == records[3][0] - record = Util.next(iter1) + record = next(iter1) assert record.get("x") == records[4][0] - record = Util.next(result) + record = next(result) assert record.get("x") == records[5][0] - record = Util.next(iter2) + record = next(iter2) assert record.get("x") == records[6][0] - record = Util.next(iter1) + record = next(iter1) assert record.get("x") == records[7][0] - record = Util.next(Util.iter(result)) + record = next(iter(result)) assert record.get("x") == records[8][0] assert [r.get("x") for r in result] == [records[9][0]] with pytest.raises(StopIteration): - Util.next(iter1) + next(iter1) with pytest.raises(StopIteration): - Util.next(iter2) + next(iter2) with pytest.raises(StopIteration): - Util.next(result) + next(result) with pytest.raises(StopIteration): - Util.next(Util.iter(result)) + next(iter(result)) assert [r.get("x") for r in result] == [] @@ -436,8 +436,8 @@ def test_result_peek(records, fetch_size): else: assert isinstance(record, Record) assert record.get("x") == records[i][0] - iter_ = Util.iter(result) - Util.next(iter_) # consume the record + iter_ = iter(result) + next(iter_) # consume the record @pytest.mark.parametrize("records", ([[1], [2]], [[1]], [])) @@ -547,7 +547,7 @@ def test_consume(records, consume_one, summary_meta, consume_times): result._run("CYPHER", {}, None, None, "r", None, None, None) if consume_one: with suppress(StopIteration): - Util.next(Util.iter(result)) + next(iter(result)) for _ in range(consume_times): summary = result.consume() assert isinstance(summary, ResultSummary) @@ -775,7 +775,7 @@ def test_to_eager_result(records): assert len(eager_result.records) == len(records) assert all( list(record) == list(raw) - for record, raw in zip(eager_result.records, records) + for record, raw in zip(eager_result.records, records, strict=True) ) assert eager_result.summary is eager_result[1] @@ -794,10 +794,20 @@ def test_to_eager_result(records): @pytest.mark.parametrize( ("keys", "values", "types", "instances"), ( - (["i"], list(zip(range(5))), ["int64"], None), - (["x"], list(zip((n - 0.5) / 5 for n in range(5))), ["float64"], None), - (["s"], list(zip(("foo", "bar", "baz", "foobar"))), ["object"], None), - (["l"], list(zip(([1, 2], [3, 4]))), ["object"], None), + (["i"], list(zip(range(5), strict=True)), ["int64"], None), + ( + ["x"], + list(zip(((n - 0.5) / 5 for n in range(5)), strict=True)), + ["float64"], + None, + ), + ( + ["s"], + list(zip(("foo", "bar", "baz", "foobar"), strict=True)), + ["object"], + None, + ), + (["l"], list(zip(([1, 2], [3, 4]), strict=True)), ["object"], None), ( ["n"], list( @@ -815,7 +825,8 @@ def test_to_eager_result(records): {"a": [1, "a"]}, "cool_id", ), - ) + ), + strict=True, ) ), ["object"], @@ -841,7 +852,8 @@ def test_to_eager_result(records): "1337", "69", ), - ) + ), + strict=True, ) ), ["object"], @@ -881,7 +893,7 @@ def test_to_df(keys, values, types, instances, test_default_expand): ( ( ["i"], - list(zip(range(5))), + list(zip(range(5), strict=True)), ["i"], [[0], [1], [2], [3], [4]], ["int64"], @@ -889,35 +901,35 @@ def test_to_df(keys, values, types, instances, test_default_expand): # test variable name escaping ( ["i.[]->.().{}.\\"], - list(zip(range(5))), + list(zip(range(5), strict=True)), ["i\\.[]->\\.()\\.{}\\.\\\\"], [[0], [1], [2], [3], [4]], ["int64"], ), ( ["x"], - list(zip((n - 0.5) / 5 for n in range(5))), + list(zip(((n - 0.5) / 5 for n in range(5)), strict=True)), ["x"], [[-0.1], [0.1], [0.3], [0.5], [0.7]], ["float64"], ), ( ["s"], - list(zip(("foo", "bar", "baz", "foobar"))), + list(zip(("foo", "bar", "baz", "foobar"), strict=True)), ["s"], [["foo"], ["bar"], ["baz"], ["foobar"]], ["object"], ), ( ["l"], - list(zip(([1, 2], [3, 4]))), + list(zip(([1, 2], [3, 4]), strict=True)), ["l[].0", "l[].1"], [[1, 2], [3, 4]], ["int64", "int64"], ), ( ["l"], - list(zip(([1, 2], [3, 4, 5], [6]))), + list(zip(([1, 2], [3, 4, 5], [6]), strict=True)), ["l[].0", "l[].1", "l[].2"], [[1, 2, None], [3, 4, 5], [6, None, None]], # pandas turns None in int columns into NaN @@ -926,7 +938,9 @@ def test_to_df(keys, values, types, instances, test_default_expand): ), ( ["d"], - list(zip(({"a": 1, "b": 2}, {"a": 3, "b": 4, "": 0}))), + list( + zip(({"a": 1, "b": 2}, {"a": 3, "b": 4, "": 0}), strict=True) + ), ["d{}.a", "d{}.b", "d{}."], [[1, 2, None], [3, 4, 0]], ["int64", "int64", "float64"], @@ -934,14 +948,14 @@ def test_to_df(keys, values, types, instances, test_default_expand): # test key escaping ( ["d"], - list(zip(({"a.[]\\{}->.().{}.": 1, "b": 2},))), + list(zip(({"a.[]\\{}->.().{}.": 1, "b": 2},), strict=True)), ["d{}.a\\.[]\\\\{}->\\.()\\.{}\\.", "d{}.b"], [[1, 2]], ["int64", "int64"], ), ( ["d"], - list(zip(({"a": 1, "b": 2}, {"a": 3, "c": 4}))), + list(zip(({"a": 1, "b": 2}, {"a": 3, "c": 4}), strict=True)), ["d{}.a", "d{}.b", "d{}.c"], [[1, 2, None], [3, None, 4]], # pandas turns None in int columns into NaN @@ -950,7 +964,12 @@ def test_to_df(keys, values, types, instances, test_default_expand): ), ( ["x"], - list(zip(([{"foo": "bar", "baz": [42, 0.1]}, "foobar"],))), + list( + zip( + ([{"foo": "bar", "baz": [42, 0.1]}, "foobar"],), + strict=True, + ) + ), ["x[].0{}.foo", "x[].0{}.baz[].0", "x[].0{}.baz[].1", "x[].1"], [["bar", 42, 0.1, "foobar"]], ["object", "int64", "float64", "object"], @@ -981,7 +1000,8 @@ def test_to_df(keys, values, types, instances, test_default_expand): ["LABEL_A", "LABEL_B"], {"a": [1, "a"], "d": 3}, ), - ) + ), + strict=True, ) ), [ @@ -1033,7 +1053,8 @@ def test_to_df(keys, values, types, instances, test_default_expand): "r-1337", "r-69", ), - ) + ), + strict=True, ) ), [ diff --git a/tests/unit/sync/work/test_session.py b/tests/unit/sync/work/test_session.py index 4897c086c..62725f10f 100644 --- a/tests/unit/sync/work/test_session.py +++ b/tests/unit/sync/work/test_session.py @@ -135,11 +135,13 @@ def test_opens_connection_on_tx_begin(fake_pool): def test_keeps_connection_on_tx_run( fake_pool, test_run_args, repetitions ): - with Session(fake_pool, SessionConfig()) as session: - with session.begin_transaction() as tx: - for _ in range(repetitions): - tx.run(*test_run_args) - assert session._connection is not None + with ( + Session(fake_pool, SessionConfig()) as session, + session.begin_transaction() as tx, + ): + for _ in range(repetitions): + tx.run(*test_run_args) + assert session._connection is not None @pytest.mark.parametrize( @@ -150,12 +152,14 @@ def test_keeps_connection_on_tx_run( def test_keeps_connection_on_tx_consume( fake_pool, test_run_args, repetitions ): - with Session(fake_pool, SessionConfig()) as session: - with session.begin_transaction() as tx: - for _ in range(repetitions): - result = tx.run(*test_run_args) - result.consume() - assert session._connection is not None + with ( + Session(fake_pool, SessionConfig()) as session, + session.begin_transaction() as tx, + ): + for _ in range(repetitions): + result = tx.run(*test_run_args) + result.consume() + assert session._connection is not None @pytest.mark.parametrize( diff --git a/tests/unit/sync/work/test_transaction.py b/tests/unit/sync/work/test_transaction.py index b78768a2c..49ca44312 100644 --- a/tests/unit/sync/work/test_transaction.py +++ b/tests/unit/sync/work/test_transaction.py @@ -346,18 +346,18 @@ def test_server_error_propagates(scripted_connection, error): tx = Transaction(connection, 2, None, noop, noop, noop, None) res1 = tx.run("UNWIND range(1, 1000) AS n RETURN n") - assert res1.__next__() == {"n": 1} + assert next(res1) == {"n": 1} res2 = tx.run("RETURN 'causes error later'") assert res2.fetch(2) == [{"n": 1}, {"n": 2}] with pytest.raises(expected_error) as exc1: - res2.__next__() + next(res2) # can finish the buffer assert res1.fetch(1) == [{"n": 2}] # then fails because the connection was broken by res2 with pytest.raises(ResultFailedError) as exc2: - res1.__next__() + next(res1) assert exc1.value is exc2.value.__cause__ diff --git a/tox.ini b/tox.ini index c6f46f7c8..f00edc9a0 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,5 @@ [tox] -envlist = py{37,38,39,310,311,312,313}-{unit,integration,performance} -# for Python 3.7 support (https://github.com/tox-dev/tox/issues/3416#issuecomment-2426989929) -requires = virtualenv<20.22.0 +envlist = py{310,311,312,313}-{unit,integration,performance} [testenv] passenv = TEST_*