diff --git a/doc/conf.py b/doc/conf.py
index 1719061c77..2d186b1968 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-#
# pytensor documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 7 16:34:06 2008.
#
diff --git a/doc/extending/extending_pytensor_solution_1.py b/doc/extending/extending_pytensor_solution_1.py
index 08a6bd50d9..d8bb773134 100644
--- a/doc/extending/extending_pytensor_solution_1.py
+++ b/doc/extending/extending_pytensor_solution_1.py
@@ -73,7 +73,6 @@ def grad(self, inputs, output_grads):
import numpy as np
from tests import unittest_tools as utt
-from pytensor import function, printing
from pytensor import tensor as at
from pytensor.graph.basic import Apply
from pytensor.graph.op import Op
diff --git a/doc/generate_dtype_tensor_table.py b/doc/generate_dtype_tensor_table.py
index 3d1411b426..7567996d57 100644
--- a/doc/generate_dtype_tensor_table.py
+++ b/doc/generate_dtype_tensor_table.py
@@ -1,5 +1,3 @@
-
-
letters = [
('b', 'int8'),
('w', 'int16'),
diff --git a/doc/scripts/docgen.py b/doc/scripts/docgen.py
index 89b37e0f2a..ecb7534995 100644
--- a/doc/scripts/docgen.py
+++ b/doc/scripts/docgen.py
@@ -1,8 +1,6 @@
-
import sys
import os
import shutil
-import inspect
import getopt
from collections import defaultdict
@@ -16,7 +14,7 @@
sys.argv[1:],
'o:f:',
['rst', 'help', 'nopdf', 'cache', 'check', 'test'])
- options.update(dict([x, y or True] for x, y in opts))
+ options.update({x: y or True for x, y in opts})
if options['--help']:
print(f'Usage: {sys.argv[0]} [OPTIONS] [files...]')
print(' -o
: output the html files in the specified dir')
@@ -100,8 +98,6 @@ def call_sphinx(builder, workdir):
shutil.rmtree(workdir)
except OSError as e:
print('OSError:', e)
- except IOError as e:
- print('IOError:', e)
if options['--test']:
mkdir("doc")
diff --git a/pytensor/_version.py b/pytensor/_version.py
index 246fad0d18..a61f4f2658 100644
--- a/pytensor/_version.py
+++ b/pytensor/_version.py
@@ -105,7 +105,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=
return None, None
else:
if verbose:
- print("unable to find command, tried %s" % (commands,))
+ print(f"unable to find command, tried {commands}")
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
@@ -155,7 +155,7 @@ def git_get_keywords(versionfile_abs):
# _version.py.
keywords = {}
try:
- with open(versionfile_abs, "r") as fobj:
+ with open(versionfile_abs) as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
@@ -351,7 +351,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
- pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
+ pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format(
full_tag,
tag_prefix,
)
diff --git a/pytensor/compile/debugmode.py b/pytensor/compile/debugmode.py
index 58152b9edf..d1ea7c54ff 100644
--- a/pytensor/compile/debugmode.py
+++ b/pytensor/compile/debugmode.py
@@ -1604,7 +1604,7 @@ def f():
# storage will be None
if thunk_py:
_logger.debug(
- f"{i} - running thunk_py with None as " "output storage"
+ f"{i} - running thunk_py with None as output storage"
)
try:
thunk_py()
@@ -2063,15 +2063,15 @@ def __init__(
infolog = StringIO()
print("Optimization process is unstable...", file=infolog)
print(
- " (HINT: Ops that the nodes point to must compare " "equal)",
+ " (HINT: Ops that the nodes point to must compare equal)",
file=infolog,
)
print(
- "(event index) (one event trace) (other event " "trace)",
+ "(event index) (one event trace) (other event trace)",
file=infolog,
)
print(
- "-------------------------------------------------" "----",
+ "-----------------------------------------------------",
file=infolog,
)
for j in range(max(len(li), len(l0))):
@@ -2292,7 +2292,7 @@ def __init__(
if not isinstance(linker, _DummyLinker):
raise Exception(
- "DebugMode can only use its own linker! You " "should not provide one.",
+ "DebugMode can only use its own linker! You should not provide one.",
linker,
)
@@ -2318,7 +2318,7 @@ def __init__(
self.require_matching_strides = require_matching_strides
if not (self.check_c_code or self.check_py_code):
- raise ValueError("DebugMode has to check at least one of c and py " "code")
+ raise ValueError("DebugMode has to check at least one of c and py code")
def __str__(self):
return "DebugMode(linker={}, optimizer={})".format(
diff --git a/pytensor/compile/function/__init__.py b/pytensor/compile/function/__init__.py
index 5e51fb3b41..27961f23bb 100644
--- a/pytensor/compile/function/__init__.py
+++ b/pytensor/compile/function/__init__.py
@@ -300,9 +300,7 @@ def opt_log1p(node):
if uses_tuple:
# we must use old semantics in this case.
if profile:
- raise NotImplementedError(
- "profiling not supported in old-style " "function"
- )
+ raise NotImplementedError("profiling not supported in old-style function")
if uses_updates or uses_givens:
raise NotImplementedError(
"In() instances and tuple inputs trigger the old "
diff --git a/pytensor/compile/function/pfunc.py b/pytensor/compile/function/pfunc.py
index 37e0e6723e..ba9388338b 100644
--- a/pytensor/compile/function/pfunc.py
+++ b/pytensor/compile/function/pfunc.py
@@ -181,7 +181,7 @@ def clone_inputs(i):
raise TypeError("update target must be a SharedVariable", store_into)
if store_into in update_d:
raise ValueError(
- "this shared variable already has an update " "expression",
+ "this shared variable already has an update expression",
(store_into, update_d[store_into]),
)
diff --git a/pytensor/compile/nanguardmode.py b/pytensor/compile/nanguardmode.py
index fd97e6ac8d..9c50dd99da 100644
--- a/pytensor/compile/nanguardmode.py
+++ b/pytensor/compile/nanguardmode.py
@@ -225,7 +225,7 @@ def do_check_on(value, nd, var=None):
print(pytensor.printing.debugprint(nd, file="str"), file=sio)
else:
print(
- "NanGuardMode found an error in an input of the " "graph.",
+ "NanGuardMode found an error in an input of the graph.",
file=sio,
)
# Add the stack trace
diff --git a/pytensor/compile/profiling.py b/pytensor/compile/profiling.py
index 7e4dc1c403..5a9e2ade7a 100644
--- a/pytensor/compile/profiling.py
+++ b/pytensor/compile/profiling.py
@@ -1308,7 +1308,7 @@ def compute_max_stats(running_memory, stats):
if len(fct_memory) > 1:
print(
- "Memory Profile (the max between all functions in " "that profile)",
+ "Memory Profile (the max between all functions in that profile)",
file=file,
)
else:
diff --git a/pytensor/compile/sharedvalue.py b/pytensor/compile/sharedvalue.py
index e2c40da352..4256da935e 100644
--- a/pytensor/compile/sharedvalue.py
+++ b/pytensor/compile/sharedvalue.py
@@ -72,7 +72,7 @@ def __init__(
self.container = container
if (value is not None) or (strict is not None):
raise TypeError(
- "value and strict are ignored if you pass " "a container here"
+ "value and strict are ignored if you pass a container here"
)
else:
self.container = Container(
diff --git a/pytensor/configdefaults.py b/pytensor/configdefaults.py
index c8c21dd27f..d6afff0fd5 100644
--- a/pytensor/configdefaults.py
+++ b/pytensor/configdefaults.py
@@ -585,7 +585,7 @@ def add_compile_configvars():
config.add(
"cmodule__age_thresh_use",
- "In seconds. The time after which " "PyTensor won't reuse a compile c module.",
+ "In seconds. The time after which PyTensor won't reuse a compile c module.",
# 24 days
IntParam(60 * 60 * 24 * 24, mutable=False),
in_c_key=False,
@@ -1004,7 +1004,7 @@ def add_testvalue_and_checking_configvars():
config.add(
"on_shape_error",
- "warn: print a warning and use the default" " value. raise: raise an error",
+ "warn: print a warning and use the default value. raise: raise an error",
EnumStr("warn", ["raise"]),
in_c_key=False,
)
@@ -1149,14 +1149,14 @@ def add_metaopt_configvars():
config.add(
"metaopt__optimizer_excluding",
- ("exclude optimizers with these tags. " "Separate tags with ':'."),
+ ("exclude optimizers with these tags. Separate tags with ':'."),
StrParam(""),
in_c_key=False,
)
config.add(
"metaopt__optimizer_including",
- ("include optimizers with these tags. " "Separate tags with ':'."),
+ ("include optimizers with these tags. Separate tags with ':'."),
StrParam(""),
in_c_key=False,
)
diff --git a/pytensor/configparser.py b/pytensor/configparser.py
index a1a9ac5bcb..5f9716589b 100644
--- a/pytensor/configparser.py
+++ b/pytensor/configparser.py
@@ -125,10 +125,7 @@ def get_config_hash(self):
)
return hash_from_code(
"\n".join(
- [
- "{} = {}".format(cv.name, cv.__get__(self, self.__class__))
- for cv in all_opts
- ]
+ [f"{cv.name} = {cv.__get__(self, self.__class__)}" for cv in all_opts]
)
)
diff --git a/pytensor/gradient.py b/pytensor/gradient.py
index 8fdb40703a..74cd02bd97 100644
--- a/pytensor/gradient.py
+++ b/pytensor/gradient.py
@@ -91,10 +91,8 @@ def grad_not_implemented(op, x_pos, x, comment=""):
return (
NullType(
- (
- "This variable is Null because the grad method for "
- f"input {x_pos} ({x}) of the {op} op is not implemented. {comment}"
- )
+ "This variable is Null because the grad method for "
+ f"input {x_pos} ({x}) of the {op} op is not implemented. {comment}"
)
)()
@@ -114,10 +112,8 @@ def grad_undefined(op, x_pos, x, comment=""):
return (
NullType(
- (
- "This variable is Null because the grad method for "
- f"input {x_pos} ({x}) of the {op} op is not implemented. {comment}"
- )
+ "This variable is Null because the grad method for "
+ f"input {x_pos} ({x}) of the {op} op is not implemented. {comment}"
)
)()
@@ -1275,14 +1271,12 @@ def try_to_copy_if_needed(var):
# We therefore don't allow it because its usage has become
# so muddied.
raise TypeError(
- (
- f"{node.op}.grad returned None for a gradient term, "
- "this is prohibited. Instead of None,"
- "return zeros_like(input), disconnected_type(),"
- " or a NullType variable such as those made with "
- "the grad_undefined or grad_unimplemented helper "
- "functions."
- )
+ f"{node.op}.grad returned None for a gradient term, "
+ "this is prohibited. Instead of None,"
+ "return zeros_like(input), disconnected_type(),"
+ " or a NullType variable such as those made with "
+ "the grad_undefined or grad_unimplemented helper "
+ "functions."
)
# Check that the gradient term for this input
@@ -1402,10 +1396,8 @@ def access_grad_cache(var):
if hasattr(var, "ndim") and term.ndim != var.ndim:
raise ValueError(
- (
- f"{node.op}.grad returned a term with"
- f" {int(term.ndim)} dimensions, but {int(var.ndim)} are required."
- )
+ f"{node.op}.grad returned a term with"
+ f" {int(term.ndim)} dimensions, but {int(var.ndim)} are required."
)
terms.append(term)
@@ -1767,10 +1759,8 @@ def verify_grad(
for i, p in enumerate(pt):
if p.dtype not in ("float16", "float32", "float64"):
raise TypeError(
- (
- "verify_grad can work only with floating point "
- f'inputs, but input {i} has dtype "{p.dtype}".'
- )
+ "verify_grad can work only with floating point "
+ f'inputs, but input {i} has dtype "{p.dtype}".'
)
_type_tol = dict( # relative error tolerances for different types
diff --git a/pytensor/graph/basic.py b/pytensor/graph/basic.py
index 00a4f8edc4..fc85209236 100644
--- a/pytensor/graph/basic.py
+++ b/pytensor/graph/basic.py
@@ -1184,11 +1184,9 @@ def clone_replace(
items = []
else:
raise ValueError(
- (
- "replace is neither a dictionary, list, "
- f"tuple or None ! The value provided is {replace},"
- f"of type {type(replace)}"
- )
+ "replace is neither a dictionary, list, "
+ f"tuple or None ! The value provided is {replace},"
+ f"of type {type(replace)}"
)
tmp_replace = [(x, x.type()) for x, y in items]
new_replace = [(x, y) for ((_, x), (_, y)) in zip(tmp_replace, items)]
diff --git a/pytensor/graph/features.py b/pytensor/graph/features.py
index c985c2fbeb..013e127aaf 100644
--- a/pytensor/graph/features.py
+++ b/pytensor/graph/features.py
@@ -679,7 +679,7 @@ def on_detach(self, fgraph):
"""
if self.fgraph is not fgraph:
raise Exception(
- "This NodeFinder instance was not attached to the" " provided fgraph."
+ "This NodeFinder instance was not attached to the provided fgraph."
)
self.fgraph = None
del fgraph.get_nodes
diff --git a/pytensor/graph/op.py b/pytensor/graph/op.py
index 3a79e5801b..2a2d64d10b 100644
--- a/pytensor/graph/op.py
+++ b/pytensor/graph/op.py
@@ -10,7 +10,6 @@
List,
Optional,
Sequence,
- Text,
Tuple,
TypeVar,
Union,
@@ -496,7 +495,7 @@ def prepare_node(
node: Apply,
storage_map: Optional[StorageMapType],
compute_map: Optional[ComputeMapType],
- impl: Optional[Text],
+ impl: Optional[str],
) -> None:
"""Make any special modifications that the `Op` needs before doing :meth:`Op.make_thunk`.
@@ -573,7 +572,7 @@ def make_thunk(
storage_map: StorageMapType,
compute_map: ComputeMapType,
no_recycling: List[Variable],
- impl: Optional[Text] = None,
+ impl: Optional[str] = None,
) -> ThunkType:
r"""Create a thunk.
@@ -676,7 +675,7 @@ def get_test_value(v: Any) -> Any:
return v.get_test_value()
-def missing_test_message(msg: Text) -> None:
+def missing_test_message(msg: str) -> None:
"""Display a message saying that some test_value is missing.
This uses the appropriate form based on ``config.compute_test_value``:
diff --git a/pytensor/graph/rewriting/utils.py b/pytensor/graph/rewriting/utils.py
index 2af46e1f20..e0d46e42bf 100644
--- a/pytensor/graph/rewriting/utils.py
+++ b/pytensor/graph/rewriting/utils.py
@@ -114,7 +114,7 @@ def is_same_graph_with_merge(var1, var2, givens=None):
# We also need to make sure we replace a Variable if it is present in
# `givens`.
vars_replaced = [givens.get(v, v) for v in fgraph.outputs]
- o1, o2 = [v.owner for v in vars_replaced]
+ o1, o2 = (v.owner for v in vars_replaced)
if o1 is None and o2 is None:
# Comparing two single-Variable graphs: they are equal if they are
# the same Variable.
diff --git a/pytensor/graph/type.py b/pytensor/graph/type.py
index 75ef28cab1..cbd54e2d9e 100644
--- a/pytensor/graph/type.py
+++ b/pytensor/graph/type.py
@@ -1,5 +1,5 @@
from abc import abstractmethod
-from typing import Any, Generic, Optional, Text, Tuple, TypeVar, Union
+from typing import Any, Generic, Optional, Tuple, TypeVar, Union
from typing_extensions import TypeAlias
@@ -188,7 +188,7 @@ def is_valid_value(self, data: D, strict: bool = True) -> bool:
except (TypeError, ValueError):
return False
- def make_variable(self, name: Optional[Text] = None) -> variable_type:
+ def make_variable(self, name: Optional[str] = None) -> variable_type:
"""Return a new `Variable` instance of this `Type`.
Parameters
@@ -199,7 +199,7 @@ def make_variable(self, name: Optional[Text] = None) -> variable_type:
"""
return self.variable_type(self, None, name=name)
- def make_constant(self, value: D, name: Optional[Text] = None) -> constant_type:
+ def make_constant(self, value: D, name: Optional[str] = None) -> constant_type:
"""Return a new `Constant` instance of this `Type`.
Parameters
@@ -216,7 +216,7 @@ def clone(self, *args, **kwargs) -> "Type":
"""Clone a copy of this type with the given arguments/keyword values, if any."""
return type(self)(*args, **kwargs)
- def __call__(self, name: Optional[Text] = None) -> variable_type:
+ def __call__(self, name: Optional[str] = None) -> variable_type:
"""Return a new `Variable` instance of Type `self`.
Parameters
diff --git a/pytensor/graph/utils.py b/pytensor/graph/utils.py
index d3c5ab5387..92e4af193f 100644
--- a/pytensor/graph/utils.py
+++ b/pytensor/graph/utils.py
@@ -245,9 +245,7 @@ def __str__(self):
def __str__(self):
return "{}{{{}}}".format(
self.__class__.__name__,
- ", ".join(
- "{}={!r}".format(p, getattr(self, p)) for p in props
- ),
+ ", ".join(f"{p}={getattr(self, p)!r}" for p in props),
)
dct["__str__"] = __str__
diff --git a/pytensor/link/__init__.py b/pytensor/link/__init__.py
index 5f282702bb..e69de29bb2 100644
--- a/pytensor/link/__init__.py
+++ b/pytensor/link/__init__.py
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/pytensor/link/c/basic.py b/pytensor/link/c/basic.py
index b77ddf80e3..d73050e76d 100644
--- a/pytensor/link/c/basic.py
+++ b/pytensor/link/c/basic.py
@@ -1023,7 +1023,7 @@ def c_compiler(self):
else:
if x_compiler and (x_compiler != c_compiler):
raise Exception(
- "Nodes have requested specific" " different compilers",
+ "Nodes have requested specific different compilers",
(c_compiler, x_compiler),
)
if c_compiler is None:
@@ -1698,16 +1698,14 @@ def instantiate_code(self, n_args):
print(" return NULL;", file=code)
print(" }", file=code)
print(
- """\
+ f"""\
PyObject* thunk = PyCapsule_New((void*)(&{struct_name}_executor), NULL, {struct_name}_destructor);
if (thunk != NULL && PyCapsule_SetContext(thunk, struct_ptr) != 0) {{
PyErr_Clear();
Py_DECREF(thunk);
thunk = NULL;
}}
-""".format(
- **locals()
- ),
+""",
file=code,
)
print(" return thunk; }", file=code)
diff --git a/pytensor/link/c/cmodule.py b/pytensor/link/c/cmodule.py
index ce750438ae..20530803d6 100644
--- a/pytensor/link/c/cmodule.py
+++ b/pytensor/link/c/cmodule.py
@@ -2127,7 +2127,7 @@ def get_lines(cmd, parse=True):
native_lines = get_lines(f"{config.cxx} -march=native -E -v -")
if native_lines is None:
_logger.info(
- "Call to 'g++ -march=native' failed," "not setting -march flag"
+ "Call to 'g++ -march=native' failed, not setting -march flag"
)
detect_march = False
else:
@@ -2157,15 +2157,13 @@ def get_lines(cmd, parse=True):
if len(default_lines) < 1:
reported_lines = get_lines(f"{config.cxx} -E -v -", parse=False)
warnings.warn(
- (
- "PyTensor was not able to find the "
- "default g++ parameters. This is needed to tune "
- "the compilation to your specific "
- "CPU. This can slow down the execution of PyTensor "
- "functions. Please submit the following lines to "
- "PyTensor's mailing list so that we can fix this "
- f"problem:\n {reported_lines}"
- )
+ "PyTensor was not able to find the "
+ "default g++ parameters. This is needed to tune "
+ "the compilation to your specific "
+ "CPU. This can slow down the execution of PyTensor "
+ "functions. Please submit the following lines to "
+ "PyTensor's mailing list so that we can fix this "
+ f"problem:\n {reported_lines}"
)
else:
# Some options are actually given as "-option value",
@@ -2248,7 +2246,7 @@ def join_options(init_part):
if len(version) != 3:
# Unexpected, but should not be a problem
continue
- mj, mn, patch = [int(vp) for vp in version]
+ mj, mn, patch = (int(vp) for vp in version)
if (
((mj, mn) == (4, 6) and patch < 4)
or ((mj, mn) == (4, 7) and patch <= 3)
@@ -2584,7 +2582,7 @@ def compile_str(
def print_command_line_error():
# Print command line when a problem occurred.
print(
- ("Problem occurred during compilation with the " "command line below:"),
+ ("Problem occurred during compilation with the command line below:"),
file=sys.stderr,
)
print(" ".join(cmd), file=sys.stderr)
@@ -2609,7 +2607,7 @@ def print_command_line_error():
tf.write(f"{i + 1}\t{l}\n")
tf.write("===============================\n")
tf.write(
- "Problem occurred during compilation with the " "command line below:\n"
+ "Problem occurred during compilation with the command line below:\n"
)
tf.write(" ".join(cmd))
# Print errors just below the command line.
diff --git a/pytensor/link/c/interface.py b/pytensor/link/c/interface.py
index f96416c088..b21e077598 100644
--- a/pytensor/link/c/interface.py
+++ b/pytensor/link/c/interface.py
@@ -1,6 +1,6 @@
import warnings
from abc import abstractmethod
-from typing import Callable, Dict, List, Text, Tuple, Union
+from typing import Callable, Dict, List, Tuple, Union
from pytensor.graph.basic import Apply, Constant
from pytensor.graph.utils import MethodNotDefined
@@ -9,7 +9,7 @@
class CLinkerObject:
"""Standard methods for an `Op` or `Type` used with the `CLinker`."""
- def c_headers(self, **kwargs) -> List[Text]:
+ def c_headers(self, **kwargs) -> List[str]:
"""Return a list of header files required by code returned by this class.
These strings will be prefixed with ``#include`` and inserted at the
@@ -30,7 +30,7 @@ def c_headers(self, **kwargs):
"""
return []
- def c_header_dirs(self, **kwargs) -> List[Text]:
+ def c_header_dirs(self, **kwargs) -> List[str]:
"""Return a list of header search paths required by code returned by this class.
Provides search paths for headers, in addition to those in any relevant
@@ -53,7 +53,7 @@ def c_header_dirs(self, **kwargs):
"""
return []
- def c_libraries(self, **kwargs) -> List[Text]:
+ def c_libraries(self, **kwargs) -> List[str]:
"""Return a list of libraries required by code returned by this class.
The compiler will search the directories specified by the environment
@@ -77,7 +77,7 @@ def c_libraries(self, **kwargs):
"""
return []
- def c_lib_dirs(self, **kwargs) -> List[Text]:
+ def c_lib_dirs(self, **kwargs) -> List[str]:
"""Return a list of library search paths required by code returned by this class.
Provides search paths for libraries, in addition to those in any
@@ -100,7 +100,7 @@ def c_lib_dirs(self, **kwargs):
"""
return []
- def c_support_code(self, **kwargs) -> Text:
+ def c_support_code(self, **kwargs) -> str:
"""Return utility code for use by a `Variable` or `Op`.
This is included at global scope prior to the rest of the code for this class.
@@ -115,7 +115,7 @@ def c_support_code(self, **kwargs) -> Text:
"""
return ""
- def c_compile_args(self, **kwargs) -> List[Text]:
+ def c_compile_args(self, **kwargs) -> List[str]:
"""Return a list of recommended compile arguments for code returned by other methods in this class.
Compiler arguments related to headers, libraries and search paths
@@ -133,7 +133,7 @@ def c_compile_args(self, **kwargs):
"""
return []
- def c_no_compile_args(self, **kwargs) -> List[Text]:
+ def c_no_compile_args(self, **kwargs) -> List[str]:
"""Return a list of incompatible ``gcc`` compiler arguments.
We will remove those arguments from the command line of ``gcc``. So if
@@ -145,7 +145,7 @@ def c_no_compile_args(self, **kwargs) -> List[Text]:
"""
return []
- def c_init_code(self, **kwargs) -> List[Text]:
+ def c_init_code(self, **kwargs) -> List[str]:
"""Return a list of code snippets to be inserted in module initialization."""
return []
@@ -176,11 +176,11 @@ class CLinkerOp(CLinkerObject):
def c_code(
self,
node: Apply,
- name: Text,
- inputs: List[Text],
- outputs: List[Text],
- sub: Dict[Text, Text],
- ) -> Text:
+ name: str,
+ inputs: List[str],
+ outputs: List[str],
+ sub: Dict[str, str],
+ ) -> str:
"""Return the C implementation of an ``Op``.
Returns C code that does the computation associated to this ``Op``,
@@ -240,11 +240,11 @@ def c_code_cache_version_apply(self, node: Apply) -> Tuple[int, ...]:
def c_code_cleanup(
self,
node: Apply,
- name: Text,
- inputs: List[Text],
- outputs: List[Text],
- sub: Dict[Text, Text],
- ) -> Text:
+ name: str,
+ inputs: List[str],
+ outputs: List[str],
+ sub: Dict[str, str],
+ ) -> str:
"""Return C code to run after :meth:`CLinkerOp.c_code`, whether it failed or not.
This is a convenient place to clean up things allocated by :meth:`CLinkerOp.c_code`.
@@ -275,7 +275,7 @@ def c_code_cleanup(
"""
return ""
- def c_support_code_apply(self, node: Apply, name: Text) -> Text:
+ def c_support_code_apply(self, node: Apply, name: str) -> str:
"""Return `Apply`-specialized utility code for use by an `Op` that will be inserted at global scope.
Parameters
@@ -296,7 +296,7 @@ def c_support_code_apply(self, node: Apply, name: Text) -> Text:
"""
return ""
- def c_init_code_apply(self, node: Apply, name: Text) -> Text:
+ def c_init_code_apply(self, node: Apply, name: str) -> str:
"""Return a code string specific to the `Apply` to be inserted in the module initialization code.
Parameters
@@ -318,7 +318,7 @@ def c_init_code_apply(self, node: Apply, name: Text) -> Text:
"""
return ""
- def c_init_code_struct(self, node: Apply, name, sub) -> Text:
+ def c_init_code_struct(self, node: Apply, name, sub) -> str:
"""Return an `Apply`-specific code string to be inserted in the struct initialization code.
Parameters
@@ -335,7 +335,7 @@ def c_init_code_struct(self, node: Apply, name, sub) -> Text:
"""
return ""
- def c_support_code_struct(self, node: Apply, name: Text) -> Text:
+ def c_support_code_struct(self, node: Apply, name: str) -> str:
"""Return `Apply`-specific utility code for use by an `Op` that will be inserted at struct scope.
Parameters
@@ -349,7 +349,7 @@ def c_support_code_struct(self, node: Apply, name: Text) -> Text:
"""
return ""
- def c_cleanup_code_struct(self, node: Apply, name: Text) -> Text:
+ def c_cleanup_code_struct(self, node: Apply, name: str) -> str:
"""Return an `Apply`-specific code string to be inserted in the struct cleanup code.
Parameters
@@ -373,8 +373,8 @@ class CLinkerType(CLinkerObject):
@abstractmethod
def c_declare(
- self, name: Text, sub: Dict[Text, Text], check_input: bool = True
- ) -> Text:
+ self, name: str, sub: Dict[str, str], check_input: bool = True
+ ) -> str:
"""Return C code to declare variables that will be instantiated by :meth:`CLinkerType.c_extract`.
Parameters
@@ -411,7 +411,7 @@ def c_declare(self, name, sub, check_input=True):
"""
@abstractmethod
- def c_init(self, name: Text, sub: Dict[Text, Text]) -> Text:
+ def c_init(self, name: str, sub: Dict[str, str]) -> str:
"""Return C code to initialize the variables that were declared by :meth:`CLinkerType.c_declare`.
Notes
@@ -435,8 +435,8 @@ def c_init(self, name, sub):
@abstractmethod
def c_extract(
- self, name: Text, sub: Dict[Text, Text], check_input: bool = True, **kwargs
- ) -> Text:
+ self, name: str, sub: Dict[str, str], check_input: bool = True, **kwargs
+ ) -> str:
r"""Return C code to extract a ``PyObject *`` instance.
The code returned from this function must be templated using
@@ -475,7 +475,7 @@ def c_extract(self, name, sub, check_input=True, **kwargs):
"""
@abstractmethod
- def c_sync(self, name: Text, sub: Dict[Text, Text]) -> Text:
+ def c_sync(self, name: str, sub: Dict[str, str]) -> str:
"""Return C code to pack C types back into a ``PyObject``.
The code returned from this function must be templated using
@@ -494,7 +494,7 @@ def c_sync(self, name: Text, sub: Dict[Text, Text]) -> Text:
"""
- def c_element_type(self) -> Text:
+ def c_element_type(self) -> str:
"""Return the name of the primitive C type of items into variables handled by this type.
e.g:
@@ -513,7 +513,7 @@ def c_is_simple(self) -> bool:
"""
return False
- def c_literal(self, data: Constant) -> Text:
+ def c_literal(self, data: Constant) -> str:
"""Provide a C literal string value for the specified `data`.
Parameters
@@ -525,8 +525,8 @@ def c_literal(self, data: Constant) -> Text:
return ""
def c_extract_out(
- self, name: Text, sub: Dict[Text, Text], check_input: bool = True, **kwargs
- ) -> Text:
+ self, name: str, sub: Dict[str, str], check_input: bool = True, **kwargs
+ ) -> str:
"""Return C code to extract a ``PyObject *`` instance.
Unlike :math:`CLinkerType.c_extract`, :meth:`CLinkerType.c_extract_out` has to
@@ -549,7 +549,7 @@ def c_extract_out(
c_extract_code=self.c_extract(name, sub, check_input),
)
- def c_cleanup(self, name: Text, sub: Dict[Text, Text]) -> Text:
+ def c_cleanup(self, name: str, sub: Dict[str, str]) -> str:
"""Return C code to clean up after :meth:`CLinkerType.c_extract`.
This returns C code that should deallocate whatever
diff --git a/pytensor/link/c/params_type.py b/pytensor/link/c/params_type.py
index c5c227fd17..55a94116cd 100644
--- a/pytensor/link/c/params_type.py
+++ b/pytensor/link/c/params_type.py
@@ -264,10 +264,7 @@ def __init__(self, params_type, **kwargs):
def __repr__(self):
return "Params(%s)" % ", ".join(
- [
- ("{}:{}:{}".format(k, type(self[k]).__name__, self[k]))
- for k in sorted(self.keys())
- ]
+ [(f"{k}:{type(self[k]).__name__}:{self[k]}") for k in sorted(self.keys())]
)
def __getattr__(self, key):
@@ -430,10 +427,7 @@ def __getattr__(self, key):
def __repr__(self):
return "ParamsType<%s>" % ", ".join(
- [
- ("{}:{}".format(self.fields[i], self.types[i]))
- for i in range(self.length)
- ]
+ [(f"{self.fields[i]}:{self.types[i]}") for i in range(self.length)]
)
def __eq__(self, other):
diff --git a/pytensor/link/c/type.py b/pytensor/link/c/type.py
index 7518b0c609..24ced701ed 100644
--- a/pytensor/link/c/type.py
+++ b/pytensor/link/c/type.py
@@ -489,8 +489,7 @@ def __repr__(self):
type(self).__name__,
self.ctype,
", ".join(
- "{}{}:{}".format(k, names_to_aliases[k], self[k])
- for k in sorted(self.keys())
+ f"{k}{names_to_aliases[k]}:{self[k]}" for k in sorted(self.keys())
),
)
diff --git a/pytensor/link/jax/__init__.py b/pytensor/link/jax/__init__.py
index 4c0f2b7973..180842f084 100644
--- a/pytensor/link/jax/__init__.py
+++ b/pytensor/link/jax/__init__.py
@@ -1 +1 @@
-from pytensor.link.jax.linker import JAXLinker
+from pytensor.link.jax.linker import JAXLinker
diff --git a/pytensor/link/jax/dispatch/basic.py b/pytensor/link/jax/dispatch/basic.py
index bad6da82d0..0e0fbec20b 100644
--- a/pytensor/link/jax/dispatch/basic.py
+++ b/pytensor/link/jax/dispatch/basic.py
@@ -87,7 +87,7 @@ def jnp_safe_copy(x):
res = jnp.copy(x)
except NotImplementedError:
warnings.warn(
- "`jnp.copy` is not implemented yet. " "Using the object's `copy` method."
+ "`jnp.copy` is not implemented yet. Using the object's `copy` method."
)
if hasattr(x, "copy"):
res = jnp.array(x.copy())
diff --git a/pytensor/link/numba/dispatch/basic.py b/pytensor/link/numba/dispatch/basic.py
index 20670d4a69..c081fbe9ef 100644
--- a/pytensor/link/numba/dispatch/basic.py
+++ b/pytensor/link/numba/dispatch/basic.py
@@ -209,7 +209,7 @@ def make_slice_from_constant(context, builder, ty, pyval):
default_stop_pos,
default_stop_neg,
default_step,
- ) = [context.get_constant(types.intp, x) for x in get_defaults(context)]
+ ) = (context.get_constant(types.intp, x) for x in get_defaults(context))
step = pyval.step
if step is None:
diff --git a/pytensor/link/numba/dispatch/random.py b/pytensor/link/numba/dispatch/random.py
index c468cb72d3..23a762715b 100644
--- a/pytensor/link/numba/dispatch/random.py
+++ b/pytensor/link/numba/dispatch/random.py
@@ -24,7 +24,7 @@
class RandomStateNumbaType(types.Type):
def __init__(self):
- super(RandomStateNumbaType, self).__init__(name="RandomState")
+ super().__init__(name="RandomState")
random_state_numba_type = RandomStateNumbaType()
diff --git a/pytensor/link/utils.py b/pytensor/link/utils.py
index 586188a391..fd76e1278e 100644
--- a/pytensor/link/utils.py
+++ b/pytensor/link/utils.py
@@ -101,7 +101,7 @@ def map_storage(
"with storage in given storage_"
"map. Given input_storage: ",
storage,
- "Storage in storage_ma" "p: ",
+ "Storage in storage_map: ",
storage_map[r],
)
else:
@@ -122,7 +122,7 @@ def map_storage(
" storage_map. Given output"
"_storage: ",
storage,
- "Sto" "rage in storage_map: ",
+ "Storage in storage_map: ",
storage_map[r],
)
else:
diff --git a/pytensor/misc/check_blas.py b/pytensor/misc/check_blas.py
index 2734f2fb3d..4fd42932b4 100644
--- a/pytensor/misc/check_blas.py
+++ b/pytensor/misc/check_blas.py
@@ -45,7 +45,7 @@ def execute(execute=True, verbose=True, M=2000, N=2000, K=2000, iters=10, order=
print(" OMP_NUM_THREADS=", os.getenv("OMP_NUM_THREADS"))
print(" GOTO_NUM_THREADS=", os.getenv("GOTO_NUM_THREADS"))
print()
- print("Numpy config: (used when the PyTensor flag" ' "blas__ldflags" is empty)')
+ print('Numpy config: (used when the PyTensor flag "blas__ldflags" is empty)')
np.show_config()
print("Numpy dot module:", np.dot.__module__)
print("Numpy location:", np.__file__)
diff --git a/pytensor/misc/may_share_memory.py b/pytensor/misc/may_share_memory.py
index bc6ee2ae44..911bfdd155 100644
--- a/pytensor/misc/may_share_memory.py
+++ b/pytensor/misc/may_share_memory.py
@@ -33,7 +33,7 @@ def may_share_memory(a, b, raise_other_type=True):
b_sparse = _is_sparse(b)
if not (a_ndarray or a_sparse) or not (b_ndarray or b_sparse):
if raise_other_type:
- raise TypeError("may_share_memory support only ndarray" " and scipy.sparse")
+ raise TypeError("may_share_memory support only ndarray and scipy.sparse")
return False
return SparseTensorType.may_share_memory(a, b)
diff --git a/pytensor/printing.py b/pytensor/printing.py
index 9d481a6f39..d8c65b8493 100644
--- a/pytensor/printing.py
+++ b/pytensor/printing.py
@@ -53,7 +53,7 @@
except ImportError:
# tests should not fail on optional dependency
pydot_imported_msg = (
- "Install the python package pydot or pydot-ng." " Install graphviz."
+ "Install the python package pydot or pydot-ng. Install graphviz."
)
except Exception as e:
pydot_imported_msg = "An error happened while importing/trying pydot: "
diff --git a/pytensor/sandbox/linalg/ops.py b/pytensor/sandbox/linalg/ops.py
index bd9ee5b364..7e74f629f0 100644
--- a/pytensor/sandbox/linalg/ops.py
+++ b/pytensor/sandbox/linalg/ops.py
@@ -170,7 +170,7 @@ def spectral_radius_bound(X, log2_exponent):
)
if log2_exponent <= 0:
raise ValueError(
- "spectral_radius_bound requires a strictly positive " "exponent",
+ "spectral_radius_bound requires a strictly positive exponent",
log2_exponent,
)
diff --git a/pytensor/sandbox/rng_mrg.py b/pytensor/sandbox/rng_mrg.py
index c9d789c0ff..542b4a7f5d 100644
--- a/pytensor/sandbox/rng_mrg.py
+++ b/pytensor/sandbox/rng_mrg.py
@@ -361,7 +361,7 @@ def __str__(self):
def grad(self, inputs, ograd):
return [
gradient.grad_undefined(
- self, k, inp, "No gradient defined through " "random sampling op"
+ self, k, inp, "No gradient defined through random sampling op"
)
for k, inp in enumerate(inputs)
]
@@ -1019,7 +1019,7 @@ def multinomial(
return op(pvals, unis, n_samples)
else:
raise NotImplementedError(
- "MRG_RandomStream.multinomial only" " implemented for pvals.ndim = 2"
+ "MRG_RandomStream.multinomial only implemented for pvals.ndim = 2"
)
def choice(
@@ -1075,7 +1075,7 @@ def choice(
"""
if replace:
raise NotImplementedError(
- "MRG_RandomStream.choice only works without replacement " "for now."
+ "MRG_RandomStream.choice only works without replacement for now."
)
if a is not None:
@@ -1087,15 +1087,13 @@ def choice(
if p is None:
raise TypeError(
- "For now, p has to be specified in " "MRG_RandomStream.choice."
+ "For now, p has to be specified in MRG_RandomStream.choice."
)
p = as_tensor_variable(p)
p = undefined_grad(p)
if ndim is not None:
- raise ValueError(
- "ndim argument to " "MRG_RandomStream.choice " "is not used."
- )
+ raise ValueError("ndim argument to MRG_RandomStream.choice is not used.")
if p.ndim != 2:
raise NotImplementedError(
diff --git a/pytensor/scalar/basic.py b/pytensor/scalar/basic.py
index d7362ed372..ba2a132fd4 100644
--- a/pytensor/scalar/basic.py
+++ b/pytensor/scalar/basic.py
@@ -1174,7 +1174,7 @@ def __str__(self):
if param:
return "{}{{{}}}".format(
self.__class__.__name__,
- ", ".join("{}={}".format(k, v) for k, v in param),
+ ", ".join(f"{k}={v}" for k, v in param),
)
else:
return self.__class__.__name__
diff --git a/pytensor/scan/basic.py b/pytensor/scan/basic.py
index 94f34e63b7..9c00070ff9 100644
--- a/pytensor/scan/basic.py
+++ b/pytensor/scan/basic.py
@@ -541,13 +541,13 @@ def wrap_into_list(x):
taps = outs_info[i]["taps"]
if len(taps) > len(set(taps)):
raise ValueError(
- ("All the taps must be different in " " `outputs_info`"),
+ ("All the taps must be different in `outputs_info`"),
outs_info[i],
)
for t in taps:
if t >= 0:
raise ValueError(
- ("All the tap values must be " "smaller than 0."),
+ ("All the tap values must be smaller than 0."),
outs_info[i],
)
else:
@@ -609,11 +609,9 @@ def wrap_into_list(x):
# No need to print a warning or raise an error now,
# it will be done when fn will be called.
warnings.warn(
- (
- "Cannot compute test value for "
- "the inner function of scan, input value "
- f"missing {_seq_val_slice}"
- )
+ "Cannot compute test value for "
+ "the inner function of scan, input value "
+ f"missing {_seq_val_slice}"
)
# Add names to slices for debugging and pretty printing ..
@@ -737,10 +735,8 @@ def wrap_into_list(x):
except TestValueError:
if config.compute_test_value != "ignore":
warnings.warn(
- (
- "Cannot compute test value for the "
- f"inner function of scan, test value missing: {actual_arg}"
- )
+ "Cannot compute test value for the "
+ f"inner function of scan, test value missing: {actual_arg}"
)
if getattr(init_out["initial"], "name", None) is not None:
@@ -795,11 +791,9 @@ def wrap_into_list(x):
except TestValueError:
if config.compute_test_value != "ignore":
warnings.warn(
- (
- "Cannot compute test value for "
- "the inner function of scan, test value "
- f"missing: {_init_out_var_slice}"
- )
+ "Cannot compute test value for "
+ "the inner function of scan, test value "
+ f"missing: {_init_out_var_slice}"
)
# give it a name or debugging and pretty printing
diff --git a/pytensor/scan/op.py b/pytensor/scan/op.py
index 570308a486..d3f7b6e493 100644
--- a/pytensor/scan/op.py
+++ b/pytensor/scan/op.py
@@ -1047,7 +1047,7 @@ def make_node(self, *inputs):
raise ValueError(
err_msg1
% (
- "initial state (outputs_info" " in scan nomenclature) ",
+ "initial state (outputs_info in scan nomenclature) ",
str(outer_mitsot),
argoffset + idx,
outer_mitsot.type.dtype,
@@ -1094,7 +1094,7 @@ def make_node(self, *inputs):
raise ValueError(
err_msg1
% (
- "initial state (outputs_info" " in scan nomenclature) ",
+ "initial state (outputs_info in scan nomenclature) ",
str(outer_sitsot),
argoffset + idx,
outer_sitsot.type.dtype,
@@ -1171,7 +1171,7 @@ def make_node(self, *inputs):
raise ValueError(
err_msg1
% (
- "initial state (outputs_info" " in scan nomenclature) ",
+ "initial state (outputs_info in scan nomenclature) ",
str(outer_shared),
argoffset + idx,
outer_shared.dtype,
@@ -1197,10 +1197,8 @@ def make_node(self, *inputs):
new_inputs.append(outer_nonseq)
if not outer_nonseq.type.in_same_class(inner_nonseq.type):
raise ValueError(
- (
- f"Argument {outer_nonseq} given to the scan node is not"
- f" compatible with its corresponding loop function variable {inner_nonseq}"
- )
+ f"Argument {outer_nonseq} given to the scan node is not"
+ f" compatible with its corresponding loop function variable {inner_nonseq}"
)
for outer_nitsot in self.outer_nitsot(inputs):
@@ -3407,7 +3405,7 @@ def profile_printer(
)
else:
print(
- (" The node took 0s, so we can not " "compute the overhead"),
+ (" The node took 0s, so we can not compute the overhead"),
node,
file=file,
)
diff --git a/pytensor/scan/rewriting.py b/pytensor/scan/rewriting.py
index f32bca9973..209c06367e 100644
--- a/pytensor/scan/rewriting.py
+++ b/pytensor/scan/rewriting.py
@@ -1637,7 +1637,7 @@ def save_mem_new_scan(fgraph, node):
# 3.9. Get replace pairs for all other nodes
if flag_store or global_nsteps is not None:
for idx, o in enumerate(node.outputs):
- if not (idx in replaced_outs) and idx not in not_required:
+ if idx not in replaced_outs and idx not in not_required:
nw_pos = compress_map[idx]
old_new += [(o, new_outs[nw_pos])]
# Check if the new outputs depend on the old scan node
diff --git a/pytensor/scan/utils.py b/pytensor/scan/utils.py
index 4f1aed2454..037d383ef3 100644
--- a/pytensor/scan/utils.py
+++ b/pytensor/scan/utils.py
@@ -710,7 +710,7 @@ def from_node(node, clone=False) -> "ScanArgs":
from pytensor.scan.op import Scan
if not isinstance(node.op, Scan):
- raise TypeError("{} is not a Scan node".format(node))
+ raise TypeError(f"{node} is not a Scan node")
return ScanArgs(
node.inputs,
node.outputs,
@@ -885,9 +885,9 @@ def find_among_fields(
field_prefix = field_name[:8]
if field_prefix.endswith("in"):
- agg_field_name = "{}puts".format(field_prefix)
+ agg_field_name = f"{field_prefix}puts"
else:
- agg_field_name = "{}tputs".format(field_prefix)
+ agg_field_name = f"{field_prefix}tputs"
agg_list = getattr(self, agg_field_name)
@@ -934,12 +934,12 @@ def get_dependent_nodes(
field_info = self.find_among_fields(i)
if field_info is None:
- raise ValueError("{} not found among fields.".format(i))
+ raise ValueError(f"{i} not found among fields.")
# Find the `var_mappings` key suffix that matches the field/set of
# arguments containing our source node
if field_info.name[:8].endswith("_in"):
- map_key_suffix = "{}p".format(field_info.name[:8])
+ map_key_suffix = f"{field_info.name[:8]}p"
else:
map_key_suffix = field_info.name[:9]
@@ -963,9 +963,9 @@ def get_dependent_nodes(
# it either forms `"*_inputs"` or `"*_outputs"`.
to_agg_field_prefix = k[:9]
if to_agg_field_prefix.endswith("p"):
- to_agg_field_name = "{}uts".format(to_agg_field_prefix)
+ to_agg_field_name = f"{to_agg_field_prefix}uts"
else:
- to_agg_field_name = "{}puts".format(to_agg_field_prefix)
+ to_agg_field_name = f"{to_agg_field_prefix}puts"
to_agg_field = getattr(self, to_agg_field_name)
@@ -1047,29 +1047,29 @@ def merge(self, other: "ScanArgs") -> "ScanArgs":
def __str__(self):
inner_arg_strs = [
- "\t{}={}".format(p, getattr(self, p))
+ f"\t{p}={getattr(self, p)}"
for p in self.field_names
if p.startswith("outer_in") or p == "n_steps"
]
inner_arg_strs += [
- "\t{}={}".format(p, getattr(self, p))
+ f"\t{p}={getattr(self, p)}"
for p in self.field_names
if p.startswith("inner_in")
]
inner_arg_strs += [
- "\tmit_mot_in_slices={}".format(self.mit_mot_in_slices),
- "\tmit_sot_in_slices={}".format(self.mit_sot_in_slices),
+ f"\tmit_mot_in_slices={self.mit_mot_in_slices}",
+ f"\tmit_sot_in_slices={self.mit_sot_in_slices}",
]
inner_arg_strs += [
- "\t{}={}".format(p, getattr(self, p))
+ f"\t{p}={getattr(self, p)}"
for p in self.field_names
if p.startswith("inner_out")
]
inner_arg_strs += [
- "\tmit_mot_out_slices={}".format(self.mit_mot_out_slices),
+ f"\tmit_mot_out_slices={self.mit_mot_out_slices}",
]
inner_arg_strs += [
- "\t{}={}".format(p, getattr(self, p))
+ f"\t{p}={getattr(self, p)}"
for p in self.field_names
if p.startswith("outer_out")
]
diff --git a/pytensor/sparse/basic.py b/pytensor/sparse/basic.py
index 20af5dfc11..0b3a743bc8 100644
--- a/pytensor/sparse/basic.py
+++ b/pytensor/sparse/basic.py
@@ -185,7 +185,7 @@ def as_sparse_variable(x, name=None, ndim=None, **kwargs):
def constant(x, name=None):
if not isinstance(x, scipy.sparse.spmatrix):
- raise TypeError("sparse.constant must be called on a " "scipy.sparse.spmatrix")
+ raise TypeError("sparse.constant must be called on a scipy.sparse.spmatrix")
try:
return SparseConstant(
SparseTensorType(format=x.format, dtype=x.dtype), x.copy(), name=name
@@ -3439,7 +3439,7 @@ def make_node(self, a, b):
if not _is_sparse_variable(a):
raise TypeError(
- "First argument must be of type SparseVariable " "or SparseConstant"
+ "First argument must be of type SparseVariable or SparseConstant"
)
dtype_out = aes.upcast(a.type.dtype, b.type.dtype)
if b.type.ndim != 2:
@@ -3475,7 +3475,7 @@ def perform(self, node, inputs, outputs):
if variable.ndim == 1:
variable = np.expand_dims(variable, 1)
elif variable.ndim != 2:
- raise Exception("Output of structured dot should be a matrix " "(ndim=2)")
+ raise Exception("Output of structured dot should be a matrix (ndim=2)")
assert variable.ndim == 2
@@ -3613,7 +3613,7 @@ def c_code(self, node, name, inputs, outputs, sub):
if node.inputs[2].type.dtype in ("complex64", "complex128"):
raise NotImplementedError("Complex types are not supported for b")
if node.inputs[3].type.dtype in ("complex64", "complex128"):
- raise NotImplementedError("Complex types are not supported for " "g_ab")
+ raise NotImplementedError("Complex types are not supported for g_ab")
return """
if (PyArray_NDIM(%(_d)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
@@ -3749,7 +3749,7 @@ def c_code(self, node, name, inputs, outputs, sub):
if node.inputs[2].type.dtype in ("complex64", "complex128"):
raise NotImplementedError("Complex types are not supported for b")
if node.inputs[3].type.dtype in ("complex64", "complex128"):
- raise NotImplementedError("Complex types are not supported for " "g_ab")
+ raise NotImplementedError("Complex types are not supported for g_ab")
return """
if (PyArray_NDIM(%(_d)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(d) != 2"); %(fail)s;}
diff --git a/pytensor/sparse/rewriting.py b/pytensor/sparse/rewriting.py
index 39c06910a9..6c20325ed3 100644
--- a/pytensor/sparse/rewriting.py
+++ b/pytensor/sparse/rewriting.py
@@ -687,7 +687,7 @@ def make_node(self, alpha, x_val, x_ind, x_ptr, x_nrows, y, z):
)
if dtype_out not in ("float32", "float64"):
- raise NotImplementedError("only float types are supported in " "operands")
+ raise NotImplementedError("only float types are supported in operands")
if self.inplace:
assert z.type.dtype == dtype_out
@@ -728,7 +728,7 @@ def c_code(self, node, name, inputs, outputs, sub):
alpha, x_val, x_ind, x_ptr, x_nrows, y, z = inputs
zn = outputs[0]
if node.inputs[1].type.dtype in ("complex64", "complex128"):
- raise NotImplementedError("Complex types are not supported for " "x_val")
+ raise NotImplementedError("Complex types are not supported for x_val")
if node.inputs[5].type.dtype in ("complex64", "complex128"):
raise NotImplementedError("Complex types are not supported for y")
if node.inputs[6].type.dtype != node.outputs[0].type.dtype:
diff --git a/pytensor/tensor/basic.py b/pytensor/tensor/basic.py
index c69a978a52..28c323c47f 100644
--- a/pytensor/tensor/basic.py
+++ b/pytensor/tensor/basic.py
@@ -1938,7 +1938,7 @@ def perform(self, node, inputs, outputs):
)
if builtins.any(nb < 0 for nb in splits):
raise ValueError(
- "Attempted to make an array with a " "negative number of elements"
+ "Attempted to make an array with a negative number of elements"
)
# Checking is done, let's roll the splitting algorithm!
@@ -2179,9 +2179,7 @@ def __str__(self):
else:
return "{}{{{}}}".format(
self.__class__.__name__,
- ", ".join(
- "{}={!r}".format(p, getattr(self, p)) for p in self.__props__
- ),
+ ", ".join(f"{p}={getattr(self, p)!r}" for p in self.__props__),
)
def __setstate__(self, d):
@@ -2819,7 +2817,7 @@ def tile(x, reps, ndim=None):
elif ndim_check == 1:
if ndim is None:
raise ValueError(
- "if reps is tensor.vector, you should specify " "the ndim"
+ "if reps is tensor.vector, you should specify the ndim"
)
else:
offset = ndim - reps.shape[0]
@@ -3083,7 +3081,7 @@ def __getitem__(self, *args):
for sl in args[0]:
if isinstance(sl.step, builtins.complex):
raise NotImplementedError(
- "Not implemented for slices " "whose step is complex"
+ "Not implemented for slices whose step is complex"
)
ranges = [arange(sl.start or 0, sl.stop, sl.step or 1) for sl in args[0]]
shapes = [
@@ -3395,9 +3393,7 @@ def make_node(self, x):
x = as_tensor_variable(x)
if x.ndim < 2:
- raise ValueError(
- "ExtractDiag needs an input with 2 or more " "dimensions", x
- )
+ raise ValueError("ExtractDiag needs an input with 2 or more dimensions", x)
return Apply(
self,
[x],
@@ -3518,7 +3514,7 @@ def make_node(self, diag):
diag = as_tensor_variable(diag)
if diag.type.ndim < 1:
raise ValueError(
- "AllocDiag needs an input with 1 or more " "dimensions", diag.type
+ "AllocDiag needs an input with 1 or more dimensions", diag.type
)
return Apply(
self,
diff --git a/pytensor/tensor/blas.py b/pytensor/tensor/blas.py
index 3801837d45..1cfe1d63b0 100644
--- a/pytensor/tensor/blas.py
+++ b/pytensor/tensor/blas.py
@@ -943,7 +943,7 @@ def make_node(self, *inputs):
)
z, a, x, y, b = inputs
- zr, xr, yr = [set(view_roots(i)) for i in (z, x, y)]
+ zr, xr, yr = (set(view_roots(i)) for i in (z, x, y))
# We want the gemm to be inplace. When this op is inplace, it
# declare to be inplace only on z. So to make it safe, we
@@ -2363,8 +2363,7 @@ def contiguous(var, ndim):
),
"(%s)"
% " || ".join(
- "{strides}[{i}] == type_size".format(strides=strides, i=i)
- for i in range(1, ndim)
+ f"{strides}[{i}] == type_size" for i in range(1, ndim)
),
]
)
diff --git a/pytensor/tensor/elemwise.py b/pytensor/tensor/elemwise.py
index 764d67f5d8..9e63399706 100644
--- a/pytensor/tensor/elemwise.py
+++ b/pytensor/tensor/elemwise.py
@@ -1015,7 +1015,7 @@ def _c_all(self, node, nodename, inames, onames, sub):
# No loops
task_decl = "".join(
[
- "{}& {}_i = *{}_iter;\n".format(dtype, name, name)
+ f"{dtype}& {name}_i = *{name}_iter;\n"
for name, dtype in zip(
inames + list(real_onames), idtypes + list(real_odtypes)
)
diff --git a/pytensor/tensor/math.py b/pytensor/tensor/math.py
index eb609851e7..44a08e6738 100644
--- a/pytensor/tensor/math.py
+++ b/pytensor/tensor/math.py
@@ -267,13 +267,11 @@ def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None, None]
if len(self.axis) != 1:
- raise ValueError("R_op supported for arg_max only for " "one axis!")
+ raise ValueError("R_op supported for arg_max only for one axis!")
if self.axis[0] > 1:
- raise ValueError("R_op supported for arg_max only when " " axis is 0 or 1")
+ raise ValueError("R_op supported for arg_max only when axis is 0 or 1")
if inputs[0].ndim != 2:
- raise ValueError(
- "R_op supported for arg_max only when " " input is a matrix"
- )
+ raise ValueError("R_op supported for arg_max only when input is a matrix")
max_vals, max_pos = self.make_node(*inputs).outputs
if self.axis[0] == 0:
return [eval_points[0][max_pos, arange(eval_points[0].shape[1])], None]
@@ -1917,7 +1915,7 @@ def make_node(self, *inputs):
"pytensor.tensor.dot instead."
)
- sx, sy = [input.type.shape for input in inputs]
+ sx, sy = (input.type.shape for input in inputs)
if len(sy) == 2:
sz = sx[:-1] + sy[-1:]
elif len(sy) == 1:
diff --git a/pytensor/tensor/nlinalg.py b/pytensor/tensor/nlinalg.py
index 1c244d9324..7153626ac7 100644
--- a/pytensor/tensor/nlinalg.py
+++ b/pytensor/tensor/nlinalg.py
@@ -250,7 +250,7 @@ def make_node(self, x):
def perform(self, node, inputs, outputs):
(x,) = inputs
(w, v) = outputs
- w[0], v[0] = [z.astype(x.dtype) for z in self._numop(x)]
+ w[0], v[0] = (z.astype(x.dtype) for z in self._numop(x))
def infer_shape(self, fgraph, node, shapes):
n = shapes[0][0]
diff --git a/pytensor/tensor/nnet/abstract_conv.py b/pytensor/tensor/nnet/abstract_conv.py
index 8f9847cc6a..ded9725c61 100644
--- a/pytensor/tensor/nnet/abstract_conv.py
+++ b/pytensor/tensor/nnet/abstract_conv.py
@@ -2211,7 +2211,7 @@ def __init__(
get_scalar_constant_value(imshp_i, only_process_constants=True)
except NotScalarConstantError:
raise ValueError(
- "imshp should be None or a tuple of " "constant int values"
+ "imshp should be None or a tuple of constant int values"
).with_traceback(sys.exc_info()[2])
if kshp:
self.kshp = tuple(kshp)
@@ -2224,7 +2224,7 @@ def __init__(
get_scalar_constant_value(kshp_i, only_process_constants=True)
except NotScalarConstantError:
raise ValueError(
- "kshp should be None or a tuple of " "constant int values"
+ "kshp should be None or a tuple of constant int values"
).with_traceback(sys.exc_info()[2])
self.border_mode = border_mode
self.filter_flip = filter_flip
@@ -2283,7 +2283,7 @@ def conv(
"""
Basic slow Python 2D or 3D convolution for DebugMode
"""
- if not (mode in ("valid", "full")):
+ if mode not in ("valid", "full"):
raise ValueError(
"invalid mode {}, which must be either "
'"valid" or "full"'.format(mode)
diff --git a/pytensor/tensor/nnet/batchnorm.py b/pytensor/tensor/nnet/batchnorm.py
index 3d2235c23e..1be97b776e 100644
--- a/pytensor/tensor/nnet/batchnorm.py
+++ b/pytensor/tensor/nnet/batchnorm.py
@@ -215,7 +215,7 @@ def batch_normalization_train(
)
if (running_mean is None) != (running_var is None):
raise ValueError(
- "running_mean and running_var must either both be " "given or both be None"
+ "running_mean and running_var must either both be given or both be None"
)
if running_mean is not None and running_mean.ndim != params_ndim:
raise ValueError(
diff --git a/pytensor/tensor/nnet/conv.py b/pytensor/tensor/nnet/conv.py
index de566c1f21..c600a7a1aa 100644
--- a/pytensor/tensor/nnet/conv.py
+++ b/pytensor/tensor/nnet/conv.py
@@ -647,7 +647,7 @@ def __init__(
self.unroll_patch = False
_logger.debug(
- "AUTO FIND VERSION OF C_CODE OF CONV OP " "%s %s %s %s %s %s %s",
+ "AUTO FIND VERSION OF C_CODE OF CONV OP %s %s %s %s %s %s %s",
self.unroll_batch,
self.unroll_kern,
self.unroll_patch,
diff --git a/pytensor/tensor/nnet/neighbours.py b/pytensor/tensor/nnet/neighbours.py
index dca1c78bbc..521e0ef99e 100644
--- a/pytensor/tensor/nnet/neighbours.py
+++ b/pytensor/tensor/nnet/neighbours.py
@@ -224,7 +224,7 @@ def CEIL_INTDIV(a, b):
if mode == "wrap_centered":
if (c % 2 != 1) or (d % 2 != 1):
raise TypeError(
- "Images2Neibs:" " in mode wrap_centered need patch with odd shapes"
+ "Images2Neibs: in mode wrap_centered need patch with odd shapes"
)
if (ten4.shape[2] < c) or (ten4.shape[3] < d):
diff --git a/pytensor/tensor/random/op.py b/pytensor/tensor/random/op.py
index 05b4e4b131..6460da2e54 100644
--- a/pytensor/tensor/random/op.py
+++ b/pytensor/tensor/random/op.py
@@ -76,10 +76,8 @@ def default_supp_shape_from_params(
ref_param = dist_params[rep_param_idx]
if ref_param.ndim < ndim_supp:
raise ValueError(
- (
- "Reference parameter does not match the "
- f"expected dimensions; {ref_param} has less than {ndim_supp} dim(s)."
- )
+ "Reference parameter does not match the "
+ f"expected dimensions; {ref_param} has less than {ndim_supp} dim(s)."
)
return ref_param.shape[-ndim_supp:]
@@ -166,7 +164,7 @@ def rng_fn(self, rng, *args, **kwargs) -> Union[int, float, np.ndarray]:
return getattr(rng, self.name)(*args, **kwargs)
def __str__(self):
- props_str = ", ".join((f"{getattr(self, prop)}" for prop in self.__props__[1:]))
+ props_str = ", ".join(f"{getattr(self, prop)}" for prop in self.__props__[1:])
return f"{self.name}_rv{{{props_str}}}"
def _infer_shape(
diff --git a/pytensor/tensor/random/utils.py b/pytensor/tensor/random/utils.py
index 6ead02d23d..c6a9344b31 100644
--- a/pytensor/tensor/random/utils.py
+++ b/pytensor/tensor/random/utils.py
@@ -204,7 +204,7 @@ def __getattr__(self, obj):
)
if ns_obj is None:
- raise AttributeError("No attribute {}.".format(obj))
+ raise AttributeError(f"No attribute {obj}.")
from pytensor.tensor.random.op import RandomVariable
@@ -215,7 +215,7 @@ def meta_obj(*args, **kwargs):
return self.gen(ns_obj, *args, **kwargs)
else:
- raise AttributeError("No attribute {}.".format(obj))
+ raise AttributeError(f"No attribute {obj}.")
setattr(self, obj, meta_obj)
return getattr(self, obj)
diff --git a/pytensor/tensor/random/var.py b/pytensor/tensor/random/var.py
index 069e887d86..07a06428a1 100644
--- a/pytensor/tensor/random/var.py
+++ b/pytensor/tensor/random/var.py
@@ -8,7 +8,7 @@
class RandomStateSharedVariable(SharedVariable):
def __str__(self):
- return self.name or "RandomStateSharedVariable({})".format(repr(self.container))
+ return self.name or f"RandomStateSharedVariable({repr(self.container)})"
class RandomGeneratorSharedVariable(SharedVariable):
diff --git a/pytensor/tensor/rewriting/elemwise.py b/pytensor/tensor/rewriting/elemwise.py
index 127530bf42..e9952a3908 100644
--- a/pytensor/tensor/rewriting/elemwise.py
+++ b/pytensor/tensor/rewriting/elemwise.py
@@ -692,12 +692,10 @@ def local_fuse(fgraph, node):
except (NotImplementedError, MethodNotDefined):
warn(
- (
- "Rewrite warning: "
- f"The Op {i.owner.op.scalar_op} does not provide a C implementation."
- " As well as being potentially slow, this also disables "
- "loop fusion."
- )
+ "Rewrite warning: "
+ f"The Op {i.owner.op.scalar_op} does not provide a C implementation."
+ " As well as being potentially slow, this also disables "
+ "loop fusion."
)
scalar_node = None
@@ -762,12 +760,10 @@ def local_fuse(fgraph, node):
except (NotImplementedError, MethodNotDefined):
name = str(s_new_out[0].owner.op)
warn(
- (
- "Rewrite warning: "
- f"The Op {name} does not provide a C implementation."
- " As well as being potentially slow, this also disables "
- "loop fusion."
- )
+ "Rewrite warning: "
+ f"The Op {name} does not provide a C implementation."
+ " As well as being potentially slow, this also disables "
+ "loop fusion."
)
return False
diff --git a/pytensor/tensor/rewriting/shape.py b/pytensor/tensor/rewriting/shape.py
index 0358e4c457..20a9a62df6 100644
--- a/pytensor/tensor/rewriting/shape.py
+++ b/pytensor/tensor/rewriting/shape.py
@@ -544,11 +544,9 @@ def on_import(self, fgraph, node, reason):
# elements of the tuple can be either strings, or ints
if len(o_shapes) != len(node.outputs):
raise Exception(
- (
- f'The infer_shape method for the Op "{node.op}" returned a list '
- f"with the wrong number of element: len(o_shapes) = {len(o_shapes)} "
- f" != len(node.outputs) = {len(node.outputs)}"
- )
+ f'The infer_shape method for the Op "{node.op}" returned a list '
+ f"with the wrong number of element: len(o_shapes) = {len(o_shapes)} "
+ f" != len(node.outputs) = {len(node.outputs)}"
)
# Ensure shapes are in 'int64'. This is to make sure the assert
diff --git a/pytensor/tensor/rewriting/subtensor.py b/pytensor/tensor/rewriting/subtensor.py
index c94356d952..a252d0f446 100644
--- a/pytensor/tensor/rewriting/subtensor.py
+++ b/pytensor/tensor/rewriting/subtensor.py
@@ -1434,7 +1434,7 @@ def local_adv_sub1_adv_inc_sub1(fgraph, node):
if not fgraph.shape_feature.same_shape(idx, y, 0, 0):
cond.append(eq(idx.shape[0], y.shape[0]))
r = Assert(
- "Bad indexing or shapes in a AdvancedIncSubtensor1 " "that was optimized away"
+ "Bad indexing or shapes in a AdvancedIncSubtensor1 that was optimized away"
)(y, *cond)
copy_stack_trace(y, r)
diff --git a/pytensor/tensor/shape.py b/pytensor/tensor/shape.py
index 3db3ea5517..0a0c867f82 100644
--- a/pytensor/tensor/shape.py
+++ b/pytensor/tensor/shape.py
@@ -328,7 +328,7 @@ def shape_i(var, i, fgraph=None):
shape_of = shape_feature.shape_of
def recur(node):
- if not node.outputs[0] in shape_of:
+ if node.outputs[0] not in shape_of:
for inp in node.inputs:
if inp.owner:
recur(inp.owner)
@@ -647,10 +647,8 @@ def perform(self, node, inp, out_, params):
(out,) = out_
if len(shp) != self.ndim:
raise ValueError(
- (
- "Shape argument to Reshape has incorrect"
- f" length: {len(shp)}, should be {self.ndim}"
- )
+ "Shape argument to Reshape has incorrect"
+ f" length: {len(shp)}, should be {self.ndim}"
)
out[0] = np.reshape(x, shp)
diff --git a/pytensor/tensor/sort.py b/pytensor/tensor/sort.py
index 4af8eb5b67..cf322e1d69 100644
--- a/pytensor/tensor/sort.py
+++ b/pytensor/tensor/sort.py
@@ -452,7 +452,7 @@ def L_op(self, inputs, outputs, out_grads):
self,
0,
x,
- "topk: cannot get gradient" " without both indices and values",
+ "topk: cannot get gradient without both indices and values",
)
else:
x_shp = shape(x)
diff --git a/pytensor/tensor/subtensor.py b/pytensor/tensor/subtensor.py
index 6696bf8207..f5d81e9a84 100644
--- a/pytensor/tensor/subtensor.py
+++ b/pytensor/tensor/subtensor.py
@@ -2035,7 +2035,7 @@ def c_support_code(self, **kwargs):
def c_code(self, node, name, input_names, output_names, sub):
if self.__class__ is not AdvancedSubtensor1:
raise MethodNotDefined(
- "c_code defined for AdvancedSubtensor1," " not for child class",
+ "c_code defined for AdvancedSubtensor1, not for child class",
type(self),
)
a_name, i_name = input_names[0], input_names[1]
diff --git a/pytensor/tensor/var.py b/pytensor/tensor/var.py
index 1ce38eab1d..9d3ce67e80 100644
--- a/pytensor/tensor/var.py
+++ b/pytensor/tensor/var.py
@@ -1037,7 +1037,7 @@ def __str__(self):
name = self.name
else:
name = "TensorConstant"
- return "%s{%s}" % (name, val)
+ return f"{name}{{{val}}}"
def signature(self):
return TensorConstantSignature((self.type, self.data))
diff --git a/pytensor/typed_list/type.py b/pytensor/typed_list/type.py
index 2cc175b4b8..d7ca35e5e3 100644
--- a/pytensor/typed_list/type.py
+++ b/pytensor/typed_list/type.py
@@ -17,7 +17,7 @@ class TypedListType(CType):
def __init__(self, ttype, depth=0):
if depth < 0:
- raise ValueError("Please specify a depth superior or" "equal to 0")
+ raise ValueError("Please specify a depth superior or equal to 0")
if not isinstance(ttype, Type):
raise TypeError("Expected an PyTensor Type")
diff --git a/tests/compile/function/test_pfunc.py b/tests/compile/function/test_pfunc.py
index 6523a1b24e..c37ed77dea 100644
--- a/tests/compile/function/test_pfunc.py
+++ b/tests/compile/function/test_pfunc.py
@@ -118,7 +118,7 @@ def test_default_container(self):
# function, as a 'state' that can be updated at will.
rng = np.random.default_rng(1827)
- w_init = rng.random((5))
+ w_init = rng.random(5)
w = shared(w_init.copy(), "w")
reg = at_sum(w * w)
f = pfunc([], reg)
@@ -145,7 +145,7 @@ def test_param_strict(self):
f = pfunc([In(a, strict=False)], [out])
# works, random( generates float64 by default
- f(np.random.random((8)))
+ f(np.random.random(8))
# works, casting is allowed
f(np.array([1, 2, 3, 4], dtype="int32"))
@@ -162,14 +162,14 @@ def test_param_mutable(self):
# using mutable=True will let fip change the value in aval
fip = pfunc([In(a, mutable=True)], [a_out], mode="FAST_RUN")
- aval = np.random.random((10))
+ aval = np.random.random(10)
aval2 = aval.copy()
assert np.all(fip(aval) == (aval2 * 2))
assert not np.all(aval == aval2)
# using mutable=False should leave the input untouched
f = pfunc([In(a, mutable=False)], [a_out], mode="FAST_RUN")
- aval = np.random.random((10))
+ aval = np.random.random(10)
aval2 = aval.copy()
assert np.all(f(aval) == (aval2 * 2))
assert np.all(aval == aval2)
diff --git a/tests/compile/test_debugmode.py b/tests/compile/test_debugmode.py
index 4af86c143e..8907076849 100644
--- a/tests/compile/test_debugmode.py
+++ b/tests/compile/test_debugmode.py
@@ -806,7 +806,7 @@ def test_output_broadcast_tensor(self):
c, r = VecAsRowAndCol()(v)
f = function([v], [c, r])
- v_val = self.rng.standard_normal((5)).astype("float32")
+ v_val = self.rng.standard_normal(5).astype("float32")
f(v_val)
diff --git a/tests/link/c/test_params_type.py b/tests/link/c/test_params_type.py
index b47eb697ba..d9eb6674dd 100644
--- a/tests/link/c/test_params_type.py
+++ b/tests/link/c/test_params_type.py
@@ -43,36 +43,36 @@ def c_support_code_apply(self, node, name):
float_type = node.inputs[0].type.dtype_specs()[1]
return """
/* Computes: x = a*x*x + b*x + c for x in tensor. */
- int quadratic_%(name)s(PyArrayObject* tensor, %(float_type)s a, %(float_type)s b, %(float_type)s c) {
+ int quadratic_{name}(PyArrayObject* tensor, {float_type} a, {float_type} b, {float_type} c) {{
NpyIter* iterator = NpyIter_New(tensor,
NPY_ITER_READWRITE | NPY_ITER_EXTERNAL_LOOP | NPY_ITER_REFS_OK,
NPY_KEEPORDER, NPY_NO_CASTING, NULL);
- if(iterator == NULL) {
+ if(iterator == NULL) {{
PyErr_SetString(PyExc_RuntimeError, "Unable to iterate over a tensor for an elemwise operation.");
return -1;
- }
+ }}
NpyIter_IterNextFunc* get_next = NpyIter_GetIterNext(iterator, NULL);
char** data_ptr = NpyIter_GetDataPtrArray(iterator);
npy_intp* stride_ptr = NpyIter_GetInnerStrideArray(iterator);
npy_intp* innersize_ptr = NpyIter_GetInnerLoopSizePtr(iterator);
- do {
+ do {{
char* data = *data_ptr;
npy_intp stride = *stride_ptr;
npy_intp count = *innersize_ptr;
- while(count) {
- %(float_type)s x = *((%(float_type)s*)data);
- *((%(float_type)s*)data) = a*x*x + b*x + c;
+ while(count) {{
+ {float_type} x = *(({float_type}*)data);
+ *(({float_type}*)data) = a*x*x + b*x + c;
data += stride;
--count;
- }
- } while(get_next(iterator));
+ }}
+ }} while(get_next(iterator));
NpyIter_Deallocate(iterator);
return 0;
- }
- """ % {
- "name": name,
- "float_type": float_type,
- }
+ }}
+ """.format(
+ name=name,
+ float_type=float_type,
+ )
def c_code(self, node, name, inputs, outputs, sub):
return """
diff --git a/tests/link/jax/test_scan.py b/tests/link/jax/test_scan.py
index 57df561063..3c9cbb5496 100644
--- a/tests/link/jax/test_scan.py
+++ b/tests/link/jax/test_scan.py
@@ -87,9 +87,9 @@ def seir_one_step(ct0, dt0, st0, et0, it0, logp_c, logp_d, beta, gamma, delta):
s0, e0, i0 = 100, 50, 25
logp_c0 = np.array(0.0, dtype=config.floatX)
logp_d0 = np.array(0.0, dtype=config.floatX)
- beta_val, gamma_val, delta_val = [
+ beta_val, gamma_val, delta_val = (
np.array(val, dtype=config.floatX) for val in [0.277792, 0.135330, 0.108753]
- ]
+ )
C = np.array([3, 5, 8, 13, 21, 26, 10, 3], dtype=np.int32)
D = np.array([1, 2, 3, 7, 9, 11, 5, 1], dtype=np.int32)
diff --git a/tests/link/numba/test_scan.py b/tests/link/numba/test_scan.py
index d05340049b..04bb3aefd8 100644
--- a/tests/link/numba/test_scan.py
+++ b/tests/link/numba/test_scan.py
@@ -226,9 +226,9 @@ def seir_one_step(ct0, dt0, st0, et0, it0, logp_c, logp_d, beta, gamma, delta):
s0, e0, i0 = 100, 50, 25
logp_c0 = np.array(0.0, dtype=config.floatX)
logp_d0 = np.array(0.0, dtype=config.floatX)
- beta_val, gamma_val, delta_val = [
+ beta_val, gamma_val, delta_val = (
np.array(val, dtype=config.floatX) for val in [0.277792, 0.135330, 0.108753]
- ]
+ )
C = np.array([3, 5, 8, 13, 21, 26, 10, 3], dtype=np.int32)
D = np.array([1, 2, 3, 7, 9, 11, 5, 1], dtype=np.int32)
diff --git a/tests/scan/test_basic.py b/tests/scan/test_basic.py
index bc9b78b72d..a4aa9e7020 100644
--- a/tests/scan/test_basic.py
+++ b/tests/scan/test_basic.py
@@ -849,7 +849,7 @@ def test_shared_arguments_with_updates(self):
vu1 = asarrayX(rng.random((3, 2)))
vu2 = asarrayX(rng.random((3, 3)))
vy0 = asarrayX(rng.random((3, 2)))
- vy1 = asarrayX(rng.random((2)))
+ vy1 = asarrayX(rng.random(2))
vu1 = asarrayX(rng.random((3, 2)))
W1 = shared(vW1, "W1")
@@ -2614,8 +2614,8 @@ def test_gibbs_chain(self):
),
dtype="float32",
)
- v_bvis = np.array(rng.random((20)) - 0.5, dtype="float32")
- v_bhid = np.array(rng.random((30)) - 0.5, dtype="float32")
+ v_bvis = np.array(rng.random(20) - 0.5, dtype="float32")
+ v_bhid = np.array(rng.random(30) - 0.5, dtype="float32")
W = shared(v_W, "vW")
bhid = shared(v_bhid, "vbhid")
bvis = shared(v_bvis, "vbvis")
diff --git a/tests/sparse/sandbox/test_sp.py b/tests/sparse/sandbox/test_sp.py
index e46a98c2fb..a9dcc813a8 100644
--- a/tests/sparse/sandbox/test_sp.py
+++ b/tests/sparse/sandbox/test_sp.py
@@ -35,7 +35,7 @@ def test_convolution(self):
input = dmatrix()
rng = np.random.default_rng(3423489)
filters = rng.standard_normal((nkern, np.prod(kshp)))
- biasvals = rng.standard_normal((nkern))
+ biasvals = rng.standard_normal(nkern)
for mode in ("FAST_COMPILE", "FAST_RUN"):
ttot, ntot = 0, 0
diff --git a/tests/sparse/test_basic.py b/tests/sparse/test_basic.py
index 0ef21e9f32..d106f5ae38 100644
--- a/tests/sparse/test_basic.py
+++ b/tests/sparse/test_basic.py
@@ -1633,22 +1633,22 @@ def test_tensor_dot_types(self):
y = sparse.csc_matrix("y")
res = at.dot(x, y)
- op_types = set(type(n.op) for n in applys_between([x, y], [res]))
+ op_types = {type(n.op) for n in applys_between([x, y], [res])}
assert sparse.basic.StructuredDot in op_types
assert at.math.Dot not in op_types
res = at.dot(x_d, y)
- op_types = set(type(n.op) for n in applys_between([x, y], [res]))
+ op_types = {type(n.op) for n in applys_between([x, y], [res])}
assert sparse.basic.StructuredDot in op_types
assert at.math.Dot not in op_types
res = at.dot(x, x_d)
- op_types = set(type(n.op) for n in applys_between([x, y], [res]))
+ op_types = {type(n.op) for n in applys_between([x, y], [res])}
assert sparse.basic.StructuredDot in op_types
assert at.math.Dot not in op_types
res = at.dot(at.second(1, x), y)
- op_types = set(type(n.op) for n in applys_between([x, y], [res]))
+ op_types = {type(n.op) for n in applys_between([x, y], [res])}
assert sparse.basic.StructuredDot in op_types
assert at.math.Dot not in op_types
@@ -3203,7 +3203,7 @@ def test_mul_s_v_grad(self):
for format in ("csr", "csc"):
for dtype in ("float32", "float64"):
spmat = sp_types[format](random_lil((4, 3), dtype, 3))
- mat = np.asarray(np.random.random((3)), dtype=dtype)
+ mat = np.asarray(np.random.random(3), dtype=dtype)
verify_grad_sparse(mul_s_v, [spmat, mat], structured=True)
@@ -3217,7 +3217,7 @@ def test_mul_s_v(self):
f = pytensor.function([x, y], mul_s_v(x, y))
spmat = sp_types[format](random_lil((4, 3), dtype, 3))
- mat = np.asarray(np.random.random((3)), dtype=dtype)
+ mat = np.asarray(np.random.random(3), dtype=dtype)
out = f(spmat, mat)
@@ -3231,7 +3231,7 @@ def test_structured_add_s_v_grad(self):
for format in ("csr", "csc"):
for dtype in ("float32", "float64"):
spmat = sp_types[format](random_lil((4, 3), dtype, 3))
- mat = np.asarray(np.random.random((3)), dtype=dtype)
+ mat = np.asarray(np.random.random(3), dtype=dtype)
verify_grad_sparse(structured_add_s_v, [spmat, mat], structured=True)
@@ -3247,7 +3247,7 @@ def test_structured_add_s_v(self):
spmat = sp_types[format](random_lil((4, 3), dtype, 3))
spones = spmat.copy()
spones.data = np.ones_like(spones.data)
- mat = np.asarray(np.random.random((3)), dtype=dtype)
+ mat = np.asarray(np.random.random(3), dtype=dtype)
out = f(spmat, mat)
@@ -3273,7 +3273,7 @@ def test_op_ss(self):
tested = f(*data)
- x, y = [m.toarray() for m in data]
+ x, y = (m.toarray() for m in data)
expected = np.dot(x, y)
assert tested.format == format
diff --git a/tests/tensor/nnet/speed_test_conv.py b/tests/tensor/nnet/speed_test_conv.py
index 2b5b8166e5..b40fe75b5f 100644
--- a/tests/tensor/nnet/speed_test_conv.py
+++ b/tests/tensor/nnet/speed_test_conv.py
@@ -82,7 +82,7 @@ def exec_multilayer_conv_nnet_old(
print(conv_mode, ss, n_layer, kshp, nkern)
# actual values
- w = global_rng.random((np.r_[nkern, imshp[0], kshp]))
+ w = global_rng.random(np.r_[nkern, imshp[0], kshp])
w_flip = flip(w, kshp).reshape(w.shape)
# manual implementation
@@ -216,7 +216,7 @@ def exec_multilayer_conv_nnet(
print(conv_mode, ss, n_layer, kshp, nkern)
# actual values
- w = global_rng.random((np.r_[nkern, imshp[0], kshp]))
+ w = global_rng.random(np.r_[nkern, imshp[0], kshp])
w_flip = flip(w, kshp).reshape(w.shape)
outshp = np.hstack(
diff --git a/tests/tensor/nnet/test_basic.py b/tests/tensor/nnet/test_basic.py
index af8900237a..c6cc9c16eb 100644
--- a/tests/tensor/nnet/test_basic.py
+++ b/tests/tensor/nnet/test_basic.py
@@ -108,22 +108,22 @@ def f(a, b):
rng = np.random.default_rng(utt.fetch_seed())
- utt.verify_grad(f, [rng.random((3, 4)), rng.random((4))])
+ utt.verify_grad(f, [rng.random((3, 4)), rng.random(4)])
def f(a, b):
return softmax_with_bias(a, b)[:, 1]
- utt.verify_grad(f, [rng.random((3, 4)), rng.random((4))])
+ utt.verify_grad(f, [rng.random((3, 4)), rng.random(4)])
def f(a, b):
return softmax_with_bias(a, b)[:, 2]
- utt.verify_grad(f, [rng.random((3, 4)), rng.random((4))])
+ utt.verify_grad(f, [rng.random((3, 4)), rng.random(4)])
def f(a, b):
return softmax_with_bias(a, b)[:, 3]
- utt.verify_grad(f, [rng.random((3, 4)), rng.random((4))])
+ utt.verify_grad(f, [rng.random((3, 4)), rng.random(4)])
def test_broadcast(self):
"""
@@ -159,7 +159,7 @@ def test_infer_shape(self):
advec = vector()
rng = np.random.default_rng(utt.fetch_seed())
admat_val = rng.random((3, 4)).astype(config.floatX)
- advec_val = rng.random((4)).astype(config.floatX)
+ advec_val = rng.random(4).astype(config.floatX)
self._compile_and_check(
[admat, advec],
[SoftmaxWithBias()(admat, advec)],
@@ -177,7 +177,7 @@ def f(a, b):
rng = np.random.default_rng(utt.fetch_seed())
- utt.verify_grad(f, [rng.random((3, 4)), rng.random((4))])
+ utt.verify_grad(f, [rng.random((3, 4)), rng.random(4)])
y_idx = [0, 1, 3]
@@ -202,7 +202,7 @@ def f(a, b):
return crossentropy_softmax_1hot(shape_padleft(a) + b, y_idx)[0]
rng = np.random.default_rng(utt.fetch_seed())
- utt.verify_grad(f, [rng.random((4,)), rng.random((4))])
+ utt.verify_grad(f, [rng.random((4,)), rng.random(4)])
class TestCrossEntropySoftmax1HotWithBiasDx(utt.InferShapeTester):
@@ -214,7 +214,7 @@ def f(sm):
# Class indices
y = rng.integers(low=0, high=5, size=10).astype(class_dtype)
return crossentropy_softmax_1hot_with_bias_dx(
- rng.random((10)),
+ rng.random(10),
sm,
y, # Gradient w.r.t. NLL. # Softmax output.
)
@@ -237,7 +237,7 @@ def f(dy):
dy, softmax_output, rng.integers(low=0, high=5, size=10)
)
- utt.verify_grad(f, [rng.random((10))])
+ utt.verify_grad(f, [rng.random(10)])
def test_infer_shape(self):
admat = matrix()
@@ -246,7 +246,7 @@ def test_infer_shape(self):
rng = np.random.default_rng(utt.fetch_seed())
admat_val = rng.random((10, 5)).astype(config.floatX)
admat_val /= admat_val.sum(axis=1).reshape(10, 1)
- advec_val = rng.random((10)).astype(config.floatX)
+ advec_val = rng.random(10).astype(config.floatX)
alvec_val = rng.integers(low=0, high=5, size=10)
self._compile_and_check(
[advec, admat, alvec],
@@ -262,7 +262,7 @@ def test_neg_idx(self):
rng = np.random.default_rng(utt.fetch_seed())
admat_val = rng.random((10, 5)).astype(config.floatX)
admat_val /= admat_val.sum(axis=1).reshape(10, 1)
- advec_val = rng.random((10)).astype(config.floatX)
+ advec_val = rng.random(10).astype(config.floatX)
alvec_val = rng.integers(low=0, high=5, size=10)
alvec_val[1] = -1
out = CrossentropySoftmax1HotWithBiasDx()(advec, admat, alvec)
@@ -297,7 +297,7 @@ def grad_on_nll(x, b):
grad_on_nll_dtype(dtype),
[
rng.random((n_samples, n_classes)),
- rng.random((n_classes)),
+ rng.random(n_classes),
],
)
@@ -311,7 +311,7 @@ def grad_on_softmax(x, b):
utt.verify_grad(
grad_on_softmax,
- [rng.random((n_samples, n_classes)), rng.random((n_classes))],
+ [rng.random((n_samples, n_classes)), rng.random(n_classes)],
)
def test_infer_shape(self):
@@ -320,7 +320,7 @@ def test_infer_shape(self):
alvec = lvector()
rng = np.random.default_rng(utt.fetch_seed())
admat_val = rng.random((3, 5)).astype(config.floatX)
- advec_val = rng.random((5)).astype(config.floatX)
+ advec_val = rng.random(5).astype(config.floatX)
alvec_val = rng.integers(low=0, high=5, size=3)
self._compile_and_check(
[admat, advec, alvec],
@@ -335,7 +335,7 @@ def test_neg_idx(self):
alvec = lvector()
rng = np.random.default_rng(utt.fetch_seed())
admat_val = rng.random((3, 5)).astype(config.floatX)
- advec_val = rng.random((5)).astype(config.floatX)
+ advec_val = rng.random(5).astype(config.floatX)
alvec_val = rng.integers(low=0, high=5, size=3)
alvec_val[1] = -1
out = CrossentropySoftmaxArgmax1HotWithBias()(admat, advec, alvec)
@@ -392,7 +392,7 @@ def test_infer_shape(self):
admat = matrix()
alvec = lvector()
rng = np.random.default_rng(utt.fetch_seed())
- advec_val = rng.random((3)).astype(config.floatX)
+ advec_val = rng.random(3).astype(config.floatX)
admat_val = rng.random((3, 2)).astype(config.floatX)
alvec_val = [0, 1, 0]
self._compile_and_check(
diff --git a/tests/tensor/nnet/test_batchnorm.py b/tests/tensor/nnet/test_batchnorm.py
index 584e47467d..416060aa2b 100644
--- a/tests/tensor/nnet/test_batchnorm.py
+++ b/tests/tensor/nnet/test_batchnorm.py
@@ -43,10 +43,10 @@ def bn_ref(x, G, B, M, V):
v = vector("v")
x.tag.test_value = rng.random((2, 2)).astype(pytensor.config.floatX)
- b.tag.test_value = rng.random((2)).astype(pytensor.config.floatX)
- g.tag.test_value = rng.random((2)).astype(pytensor.config.floatX)
- m.tag.test_value = rng.random((2)).astype(pytensor.config.floatX)
- v.tag.test_value = rng.random((2)).astype(pytensor.config.floatX)
+ b.tag.test_value = rng.random(2).astype(pytensor.config.floatX)
+ g.tag.test_value = rng.random(2).astype(pytensor.config.floatX)
+ m.tag.test_value = rng.random(2).astype(pytensor.config.floatX)
+ v.tag.test_value = rng.random(2).astype(pytensor.config.floatX)
bn_ref_op = bn_ref(x, g, b, m, v)
f_ref = pytensor.function([x, b, g, m, v], [bn_ref_op])
@@ -558,7 +558,7 @@ def test_batch_normalization_train_broadcast():
assert len(nodes) == 1
assert isinstance(nodes[0].op, pytensor.compile.DeepCopyOp)
inputs = [
- np.asarray(np.random.random(((4,) * n)), x.dtype)
+ np.asarray(np.random.random((4,) * n), x.dtype)
for n in [
x.ndim,
scale.ndim,
diff --git a/tests/tensor/nnet/test_sigm.py b/tests/tensor/nnet/test_sigm.py
index bde534f340..cdc3f899e6 100644
--- a/tests/tensor/nnet/test_sigm.py
+++ b/tests/tensor/nnet/test_sigm.py
@@ -112,9 +112,9 @@ def test_composite_c_code(self):
assert isinstance(topo[0].op, Elemwise)
assert isinstance(topo[0].op.scalar_op, Composite)
- assert ultra_fast_scalar_sigmoid in set(
+ assert ultra_fast_scalar_sigmoid in {
node.op for node in topo[0].op.scalar_op.fgraph.toposort()
- )
+ }
assert len(topo) == 1
def test_local_hard_sigmoid(self):
diff --git a/tests/tensor/rewriting/test_basic.py b/tests/tensor/rewriting/test_basic.py
index 47b40bca0d..5a7de94242 100644
--- a/tests/tensor/rewriting/test_basic.py
+++ b/tests/tensor/rewriting/test_basic.py
@@ -387,7 +387,7 @@ def test_advanced_inc_subtensor(self):
len([n for n in f2.maker.fgraph.toposort() if isinstance(n.op, Alloc)]) == 0
)
- x_value = np.random.standard_normal((5)).astype(config.floatX)
+ x_value = np.random.standard_normal(5).astype(config.floatX)
y_value = np.random.standard_normal()
i_value = self.rng.integers(0, 3, size=(2, 3))
@@ -419,7 +419,7 @@ def test_advanced_inc_subtensor1(self):
len([n for n in f2.maker.fgraph.toposort() if isinstance(n.op, Alloc)]) == 0
)
- x_value = np.random.standard_normal((5)).astype(config.floatX)
+ x_value = np.random.standard_normal(5).astype(config.floatX)
y_value = np.random.standard_normal()
i_value = self.rng.integers(0, 3, size=2)
@@ -450,7 +450,7 @@ def test_incsubtensor(self):
len([n for n in f2.maker.fgraph.toposort() if isinstance(n.op, Alloc)]) == 0
)
- x_value = np.random.standard_normal((5)).astype(config.floatX)
+ x_value = np.random.standard_normal(5).astype(config.floatX)
y_value = np.random.standard_normal()
i_value = 3
@@ -490,7 +490,7 @@ def test_local_remove_useless_2(self):
fg = FunctionGraph(outputs=[assert_op(x, y, 1)], clone=False)
fg_res = rewrite_graph(fg, include=["canonicalize", "specialize"])
topo = fg_res.toposort()
- (assert_node,) = [node for node in topo if isinstance(node.op, CheckAndRaise)]
+ (assert_node,) = (node for node in topo if isinstance(node.op, CheckAndRaise))
assert assert_node.inputs == [x, y]
def test_local_remove_useless_3(self):
@@ -500,7 +500,7 @@ def test_local_remove_useless_3(self):
fg = FunctionGraph(outputs=[assert_op(x, y, 0)], clone=False)
fg_res = rewrite_graph(fg, include=["canonicalize", "specialize"])
topo = fg_res.toposort()
- (assert_node,) = [node for node in topo if isinstance(node.op, CheckAndRaise)]
+ (assert_node,) = (node for node in topo if isinstance(node.op, CheckAndRaise))
assert assert_node.inputs[:2] == [x, y]
assert assert_node.inputs[-1].data == 0
diff --git a/tests/tensor/rewriting/test_elemwise.py b/tests/tensor/rewriting/test_elemwise.py
index 4c02cf0956..a6cd369edb 100644
--- a/tests/tensor/rewriting/test_elemwise.py
+++ b/tests/tensor/rewriting/test_elemwise.py
@@ -277,13 +277,13 @@ class TestFusion:
def my_init(dtype="float64", num=0):
return np.zeros((5, 5), dtype=dtype) + num
- fw, fx, fy, fz = [
+ fw, fx, fy, fz = (
tensor(dtype="float32", shape=(None,) * 2, name=n) for n in "wxyz"
- ]
- dw, dx, dy, dz = [
+ )
+ dw, dx, dy, dz = (
tensor(dtype="float64", shape=(None,) * 2, name=n) for n in "wxyz"
- ]
- ix, iy, iz = [tensor(dtype="int32", shape=(None,) * 2, name=n) for n in "xyz"]
+ )
+ ix, iy, iz = (tensor(dtype="int32", shape=(None,) * 2, name=n) for n in "xyz")
fv = fvector("v")
fs = fscalar("s")
fwv = my_init("float32", 1)
diff --git a/tests/tensor/rewriting/test_math.py b/tests/tensor/rewriting/test_math.py
index ffe7b01c16..71c7fd9a2a 100644
--- a/tests/tensor/rewriting/test_math.py
+++ b/tests/tensor/rewriting/test_math.py
@@ -396,11 +396,11 @@ def test_elemwise_multiple_inputs_rewrites_2(self):
fxv = _asarray(np.random.random(shp), dtype="float32")
fyv = _asarray(np.random.random(shp), dtype="float32")
fzv = _asarray(np.random.random(shp), dtype="float32")
- fvv = _asarray(np.random.random((shp[0])), dtype="float32").reshape(1, shp[0])
+ fvv = _asarray(np.random.random(shp[0]), dtype="float32").reshape(1, shp[0])
dxv = _asarray(np.random.random(shp), dtype="float64")
dyv = _asarray(np.random.random(shp), dtype="float64")
dzv = _asarray(np.random.random(shp), dtype="float64")
- dvv = _asarray(np.random.random((shp[0])), dtype="float64").reshape(1, shp[0])
+ dvv = _asarray(np.random.random(shp[0]), dtype="float64").reshape(1, shp[0])
cases = [
(fx + fy, (fx, fy), (fxv, fyv), 1, "float32"),
(fx * fy, (fx, fy), (fxv, fyv), 1, "float32"),
@@ -536,12 +536,12 @@ def test_mul_div_cases(self):
fyv = _asarray(np.random.random(shp), dtype="float32")
fzv = _asarray(np.random.random(shp), dtype="float32")
fwv = _asarray(np.random.random(shp), dtype="float32")
- fvv = _asarray(np.random.random((shp[0])), dtype="float32").reshape(1, shp[0])
+ fvv = _asarray(np.random.random(shp[0]), dtype="float32").reshape(1, shp[0])
dxv = _asarray(np.random.random(shp), dtype="float64")
dyv = _asarray(np.random.random(shp), dtype="float64")
dzv = _asarray(np.random.random(shp), dtype="float64")
dwv = _asarray(np.random.random(shp), dtype="float64")
- dvv = _asarray(np.random.random((shp[0])), dtype="float64").reshape(1, shp[0])
+ dvv = _asarray(np.random.random(shp[0]), dtype="float64").reshape(1, shp[0])
# We must be sure that the `AlgebraicCanonizer` is working, but that we don't have other
# rewrites that could hide bugs in the `AlgebraicCanonizer` as `local_elemwise_fusion`
@@ -1036,10 +1036,10 @@ def sigm(x):
ival = np.random.random((5, 5))
wval = np.random.random((5, 5))
- visbval = np.random.random((5))
- hidbval = np.random.random((5))
- betaval = np.random.random((5))
- aval = np.random.random((5))
+ visbval = np.random.random(5)
+ hidbval = np.random.random(5)
+ betaval = np.random.random(5)
+ aval = np.random.random(5)
utt.assert_allclose(
f2(ival, wval, visbval, hidbval, betaval, aval),
@@ -1097,15 +1097,15 @@ def my_init(shp, dtype="float64", num=0):
ret = np.zeros(shp, dtype=dtype) + num
return ret
- fw, fx, fy, fz = [
+ fw, fx, fy, fz = (
tensor(dtype="float32", shape=(None,) * len(shp), name=n) for n in "wxyz"
- ]
- dw, dx, dy, dz = [
+ )
+ dw, dx, dy, dz = (
tensor(dtype="float64", shape=(None,) * len(shp), name=n) for n in "wxyz"
- ]
- ix, iy, iz = [
+ )
+ ix, iy, iz = (
tensor(dtype="int32", shape=(None,) * len(shp), name=n) for n in "xyz"
- ]
+ )
fv = fvector("v")
fs = fscalar("s")
@@ -1113,7 +1113,7 @@ def my_init(shp, dtype="float64", num=0):
fxv = my_init(shp, "float32", 2)
fyv = my_init(shp, "float32", 3)
fzv = my_init(shp, "float32", 4)
- fvv = _asarray(np.random.random((shp[0])), dtype="float32")
+ fvv = _asarray(np.random.random(shp[0]), dtype="float32")
fsv = np.asarray(np.random.random(), dtype="float32")
dwv = my_init(shp, "float64", 5)
ixv = _asarray(my_init(shp, num=60), dtype="int32")
@@ -1950,8 +1950,8 @@ def test_local_elemwise_sub_zeros():
mat = matrix()
rng = np.random.default_rng(seed=utt.fetch_seed())
- scalar_val = rng.random((1)).astype(config.floatX)[0]
- vect_val = rng.random((5)).astype(config.floatX)
+ scalar_val = rng.random(1).astype(config.floatX)[0]
+ vect_val = rng.random(5).astype(config.floatX)
mat_val = rng.random((3, 2)).astype(config.floatX)
mode = (
@@ -2040,7 +2040,7 @@ def test_local_useless_elemwise_comparison(self):
f = function([X, Y], Z, mode=mode)
f(
self.rng.random((2, 3)).astype(config.floatX),
- self.rng.random((2)).astype(config.floatX),
+ self.rng.random(2).astype(config.floatX),
)
# pytensor.printing.debugprint(f, print_type=True)
# here is the output for the debug print:
@@ -2323,7 +2323,7 @@ def test_local_mul_specialize():
def speed_local_pow_specialize_range():
- val = np.random.random((1e7))
+ val = np.random.random(1e7)
v = vector()
mode = get_default_mode()
mode_without_pow_rewrite = mode.excluding("local_pow_specialize")
@@ -3112,7 +3112,7 @@ def test_local_grad_log_erfc_neg(self):
def speed_local_log_erfc(self):
- val = np.random.random((1e6))
+ val = np.random.random(1e6)
x = vector()
mode = get_mode("FAST_RUN")
f1 = function([x], log(erfc(x)), mode=mode.excluding("local_log_erfc"))
@@ -3194,7 +3194,7 @@ def test_local_sum_prod_mul_by_scalar(self):
scalar1 = dscalar()
scalar2 = dscalar()
- v_val = np.random.random((2))
+ v_val = np.random.random(2)
m_val = np.random.random((2, 2))
s1_val = np.random.random()
s2_val = np.random.random()
@@ -3730,7 +3730,7 @@ def test_local_sum_div_dimshuffle(self):
rng = np.random.default_rng(utt.fetch_seed())
a_val = rng.standard_normal((2, 2)).astype(config.floatX)
- b_val = rng.standard_normal((2)).astype(config.floatX)
+ b_val = rng.standard_normal(2).astype(config.floatX)
c_val = rng.standard_normal((2, 2, 2)).astype(config.floatX)
d_val = np.asarray(rng.standard_normal(), config.floatX)
@@ -3785,7 +3785,7 @@ def test_local_prod_div_dimshuffle(self):
rng = np.random.default_rng(utt.fetch_seed())
a_val = rng.standard_normal((2, 2)).astype(config.floatX)
- b_val = rng.standard_normal((2)).astype(config.floatX)
+ b_val = rng.standard_normal(2).astype(config.floatX)
c_val = rng.standard_normal((2, 2, 2)).astype(config.floatX)
d_val = np.asarray(rng.standard_normal(), config.floatX)
@@ -4187,7 +4187,7 @@ def test_exp_over_1_plus_exp(self):
m = self.get_mode(excluding=["local_elemwise_fusion"])
x = vector()
- data = np.random.random((54)).astype(config.floatX)
+ data = np.random.random(54).astype(config.floatX)
# tests exp_over_1_plus_exp
f = pytensor.function([x], exp(x) / (1 + exp(x)), mode=m)
@@ -4470,7 +4470,7 @@ def test_logsigm_to_softplus(self):
assert isinstance(topo[0].op.scalar_op, pytensor.scalar.Neg)
assert isinstance(topo[1].op.scalar_op, pytensor.scalar.Softplus)
assert isinstance(topo[2].op.scalar_op, pytensor.scalar.Neg)
- f(np.random.random((54)).astype(config.floatX))
+ f(np.random.random(54).astype(config.floatX))
def test_log1msigm_to_softplus(self):
x = matrix()
@@ -4534,7 +4534,7 @@ def test_log1pexp_to_softplus(self):
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op.scalar_op, pytensor.scalar.Softplus)
- f(np.random.random((54)).astype(config.floatX))
+ f(np.random.random(54).astype(config.floatX))
def test_log1p_neg_sigmoid_to_softpuls(self):
x = scalar()
diff --git a/tests/tensor/rewriting/test_shape.py b/tests/tensor/rewriting/test_shape.py
index 58447fde23..00dd5c865f 100644
--- a/tests/tensor/rewriting/test_shape.py
+++ b/tests/tensor/rewriting/test_shape.py
@@ -378,7 +378,7 @@ def test_perform(self):
rng = np.random.default_rng(utt.fetch_seed())
advec = vector()
- advec_val = rng.random((3)).astype(config.floatX)
+ advec_val = rng.random(3).astype(config.floatX)
f = function([advec], Shape_i(0)(advec))
out = f(advec_val)
utt.assert_allclose(out, advec_val.shape[0])
diff --git a/tests/tensor/test_basic.py b/tests/tensor/test_basic.py
index 0b87e21be8..b34b170f7f 100644
--- a/tests/tensor/test_basic.py
+++ b/tests/tensor/test_basic.py
@@ -698,7 +698,7 @@ def setup_method(self):
self.rng = np.random.default_rng(seed=utt.fetch_seed())
def test_alloc_constant_folding(self):
- test_params = np.asarray(self.rng.standard_normal((50 * 60)), self.dtype)
+ test_params = np.asarray(self.rng.standard_normal(50 * 60), self.dtype)
some_vector = vector("some_vector", dtype=self.dtype)
some_matrix = some_vector.reshape((60, 50))
@@ -1345,8 +1345,8 @@ def test_stack_hessian(self):
Ha, Hb = hessian(B.sum(), [a, b])
# Try some values
- a_v = np.random.random((4))
- b_v = np.random.random((4))
+ a_v = np.random.random(4)
+ b_v = np.random.random(4)
f = pytensor.function([a, b], [Ha, Hb])
Ha_v, Hb_v = f(a_v, b_v)
# The Hessian is always a matrix full of 2
@@ -1364,8 +1364,8 @@ def test_stack_hessian2(self):
Ha, Hb = hessian(A.sum(), [a, b])
# Try some values
- a_v = np.random.random((4))
- b_v = np.random.random((4))
+ a_v = np.random.random(4)
+ b_v = np.random.random(4)
f = pytensor.function([a, b], [Ha, Hb])
Ha_v, Hb_v = f(a_v, b_v)
# The Hessian is always a matrix full of 0
@@ -1899,7 +1899,7 @@ def test_concatenate_same(self):
def test_mixed_ndim_error(self):
rng = np.random.default_rng(seed=utt.fetch_seed())
- v = self.shared(rng.random((4)).astype(self.floatX))
+ v = self.shared(rng.random(4).astype(self.floatX))
m = self.shared(rng.random((4, 4)).astype(self.floatX))
with pytest.raises(TypeError):
self.join_op(0, v, m)
@@ -2359,7 +2359,7 @@ def grad_tile(x, reps, np_x):
rng = np.random.default_rng(utt.fetch_seed())
# test vector
- grad_tile(vector("x"), [3], rng.standard_normal((5)).astype(config.floatX))
+ grad_tile(vector("x"), [3], rng.standard_normal(5).astype(config.floatX))
# test matrix
grad_tile(matrix("x"), [3, 4], rng.standard_normal((2, 3)).astype(config.floatX))
# test tensor3
@@ -3506,7 +3506,7 @@ def test_infer_shape(self):
def test_diag_grad(self):
rng = np.random.default_rng(utt.fetch_seed())
- x = rng.random((5))
+ x = rng.random(5)
utt.verify_grad(diag, [x], rng=rng)
x = rng.random((5, 3))
utt.verify_grad(diag, [x], rng=rng)
@@ -3656,7 +3656,7 @@ def test_stacklists():
result = f(1, 2, 3, 4)
assert result.shape == (2, 2, 1)
- a, b, c, d = [matrix(x) for x in "abcd"]
+ a, b, c, d = (matrix(x) for x in "abcd")
X = stacklists([[a, b], [c, d]])
f = function([a, b, c, d], X)
x = np.ones((4, 4), "float32")
diff --git a/tests/tensor/test_blas.py b/tests/tensor/test_blas.py
index 3b6a65c185..5aa77fea61 100644
--- a/tests/tensor/test_blas.py
+++ b/tests/tensor/test_blas.py
@@ -124,11 +124,11 @@ def cmp(self, z_, a_, x_, y_, b_):
b = np.asarray(b_, dtype=dtype)
def cmp_linker(z, a, x, y, b, l):
- z, a, x, y, b = [np.asarray(p) for p in (z, a, x, y, b)]
+ z, a, x, y, b = (np.asarray(p) for p in (z, a, x, y, b))
z_orig = z.copy()
- tz, ta, tx, ty, tb = [
+ tz, ta, tx, ty, tb = (
as_tensor_variable(p).type() for p in (z, a, x, y, b)
- ]
+ )
f = inplace_func(
[tz, ta, tx, ty, tb],
@@ -309,11 +309,11 @@ def test_transposes(self):
C = rng.random((4, 5))[:, :4]
def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"):
- z, a, x, y, b = [_asarray(p, dtype=dt) for p in (z, a, x, y, b)]
+ z, a, x, y, b = (_asarray(p, dtype=dt) for p in (z, a, x, y, b))
# z_orig = z.copy()
z_after = self._gemm(z, a, x, y, b)
- tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
+ tz, ta, tx, ty, tb = (shared(p) for p in (z, a, x, y, b))
# f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb),
# mode = Mode(optimizer = None, linker=l))
@@ -368,13 +368,13 @@ def test_non_contiguous(self):
C = rng.random((4, 4, 3))
def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"):
- z, a, x, y, b = [_asarray(p, dtype=dt) for p in (z, a, x, y, b)]
+ z, a, x, y, b = (_asarray(p, dtype=dt) for p in (z, a, x, y, b))
z_orig = z.copy()
z_after = np.zeros_like(z_orig)
for i in range(3):
z_after[:, :, i] = self._gemm(z[:, :, i], a, x[:, :, i], y[:, :, i], b)
- tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
+ tz, ta, tx, ty, tb = (shared(p) for p in (z, a, x, y, b))
for i in range(3):
f_i = inplace_func(
[],
@@ -1559,7 +1559,7 @@ def get_data(self, x_stride=1, y_stride=1):
return alpha, beta, a, x, y
def test_simple(self):
- alpha, beta, a, x, y = [shared(value) for value in self.get_data()]
+ alpha, beta, a, x, y = (shared(value) for value in self.get_data())
desired_oy = (
alpha.get_value() * matrixmultiply(a.get_value(), x.get_value())
+ beta.get_value() * y.get_value()
@@ -1597,7 +1597,7 @@ def test_default_beta_y(self):
def test_simple_transpose(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
- alpha, beta, a, x, y = [shared(v) for v in vs]
+ alpha, beta, a, x, y = (shared(v) for v in vs)
desired_oy = alpha_v * matrixmultiply(np.transpose(a_v), x_v) + beta_v * y_v
@@ -1613,7 +1613,7 @@ def test_simple_transpose(self):
def test_x_stride(self):
vs = self.get_data(x_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
- alpha, beta, a, x, y = [shared(v) for v in vs]
+ alpha, beta, a, x, y = (shared(v) for v in vs)
desired_oy = alpha_v * matrixmultiply(a_v, x_v[::2]) + beta_v * y_v
@@ -1629,7 +1629,7 @@ def test_x_stride(self):
def test_x_stride_transpose(self):
vs = self.get_data(x_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
- alpha, beta, a, x, y = [shared(v) for v in vs]
+ alpha, beta, a, x, y = (shared(v) for v in vs)
desired_oy = (
alpha_v * matrixmultiply(np.transpose(a_v), x_v[::2]) + beta_v * y_v
@@ -1647,7 +1647,7 @@ def test_x_stride_transpose(self):
def test_y_stride(self):
vs = self.get_data(y_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
- alpha, beta, a, x, y = [shared(v) for v in vs]
+ alpha, beta, a, x, y = (shared(v) for v in vs)
desired_oy = alpha_v * matrixmultiply(a_v, x_v) + beta_v * y_v[::2]
@@ -1663,7 +1663,7 @@ def test_y_stride(self):
def test_y_stride_transpose(self):
vs = self.get_data(y_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
- alpha, beta, a, x, y = [shared(v) for v in vs]
+ alpha, beta, a, x, y = (shared(v) for v in vs)
desired_oy = (
alpha_v * matrixmultiply(np.transpose(a_v), x_v) + beta_v * y_v[::2]
@@ -1681,7 +1681,7 @@ def test_y_stride_transpose(self):
def test_a_strides(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
- alpha, beta, a, x, y = [shared(v) for v in vs]
+ alpha, beta, a, x, y = (shared(v) for v in vs)
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True
@@ -1701,7 +1701,7 @@ def test_a_strides(self):
def test_a_strides_transpose(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
- alpha, beta, a, x, y = [shared(v) for v in vs]
+ alpha, beta, a, x, y = (shared(v) for v in vs)
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True
@@ -1939,8 +1939,8 @@ def test_outer(self):
f = self.function([self.x, self.y], outer(self.x, self.y))
self.assertFunctionContains(f, self.ger_destructive)
f(
- rng.random((5)).astype(self.dtype),
- rng.random((4)).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
).shape == (5, 4)
def test_A_plus_outer(self):
@@ -1949,13 +1949,13 @@ def test_A_plus_outer(self):
self.assertFunctionContains(f, self.ger)
f(
rng.random((5, 4)).astype(self.dtype),
- rng.random((5)).astype(self.dtype),
- rng.random((4)).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
).shape == (5, 4)
f(
rng.random((5, 4)).astype(self.dtype)[::-1, ::-1],
- rng.random((5)).astype(self.dtype),
- rng.random((4)).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
).shape == (5, 4)
def test_A_plus_scaled_outer(self):
@@ -1966,13 +1966,13 @@ def test_A_plus_scaled_outer(self):
self.assertFunctionContains(f, self.ger)
f(
rng.random((5, 4)).astype(self.dtype),
- rng.random((5)).astype(self.dtype),
- rng.random((4)).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
).shape == (5, 4)
f(
rng.random((5, 4)).astype(self.dtype)[::-1, ::-1],
- rng.random((5)).astype(self.dtype),
- rng.random((4)).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
).shape == (5, 4)
def test_scaled_A_plus_scaled_outer(self):
@@ -1987,13 +1987,13 @@ def test_scaled_A_plus_scaled_outer(self):
self.assertFunctionContains(f, self.gemm)
f(
rng.random((5, 4)).astype(self.dtype),
- rng.random((5)).astype(self.dtype),
- rng.random((4)).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
).shape == (5, 4)
f(
rng.random((5, 4)).astype(self.dtype)[::-1, ::-1],
- rng.random((5)).astype(self.dtype),
- rng.random((4)).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
).shape == (5, 4)
def given_dtype(self, dtype, M, N, *, destructive=True):
@@ -2010,13 +2010,13 @@ def given_dtype(self, dtype, M, N, *, destructive=True):
)
f(
rng.random((M, N)).astype(dtype),
- rng.random((M)).astype(dtype),
- rng.random((N)).astype(dtype),
+ rng.random(M).astype(dtype),
+ rng.random(N).astype(dtype),
).shape == (5, 4)
f(
rng.random((M, N)).astype(dtype)[::-1, ::-1],
- rng.random((M)).astype(dtype),
- rng.random((N)).astype(dtype),
+ rng.random(M).astype(dtype),
+ rng.random(N).astype(dtype),
).shape == (5, 4)
def test_f32_0_0(self):
@@ -2070,16 +2070,16 @@ def test_inplace(self):
self.assertFunctionContains(f, self.ger_destructive)
# TODO: Test something about the updated value of `A`
f(
- rng.random((4)).astype(self.dtype),
- rng.random((5)).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
)
A.set_value(
A.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True
)
f(
- rng.random((4)).astype(self.dtype),
- rng.random((5)).astype(self.dtype),
+ rng.random(4).astype(self.dtype),
+ rng.random(5).astype(self.dtype),
)
diff --git a/tests/tensor/test_blas_c.py b/tests/tensor/test_blas_c.py
index 245eb57a3c..b377d80b3d 100644
--- a/tests/tensor/test_blas_c.py
+++ b/tests/tensor/test_blas_c.py
@@ -281,8 +281,8 @@ def test_multiple_inplace(self):
[x, y, z], [at.dot(y, x), at.dot(z, x)], mode=mode_blas_opt
)
vx = np.random.random((3, 3))
- vy = np.random.random((3))
- vz = np.random.random((3))
+ vy = np.random.random(3)
+ vz = np.random.random(3)
out = f(vx, vy, vz)
assert np.allclose(out[0], np.dot(vy, vx))
assert np.allclose(out[1], np.dot(vz, vx))
diff --git a/tests/tensor/test_complex.py b/tests/tensor/test_complex.py
index 5360e194a5..593418da31 100644
--- a/tests/tensor/test_complex.py
+++ b/tests/tensor/test_complex.py
@@ -25,7 +25,7 @@ def test_basic(self):
def test_on_real_input(self):
x = dvector()
rng = np.random.default_rng(23)
- xval = rng.standard_normal((10))
+ xval = rng.standard_normal(10)
np.all(0 == pytensor.function([x], imag(x))(xval))
np.all(xval == pytensor.function([x], real(x))(xval))
@@ -102,7 +102,7 @@ def f(a, b):
rng = np.random.default_rng(9333)
aval = np.asarray(rng.standard_normal((2, 5)))
- bval = rng.standard_normal((5))
+ bval = rng.standard_normal(5)
try:
utt.verify_grad(f, [aval, bval])
except GradientError as e:
diff --git a/tests/tensor/test_fourier.py b/tests/tensor/test_fourier.py
index 12cd75a173..44fa83fea3 100644
--- a/tests/tensor/test_fourier.py
+++ b/tests/tensor/test_fourier.py
@@ -25,7 +25,7 @@ def test_perform(self):
def test_infer_shape(self):
a = dvector()
self._compile_and_check(
- [a], [self.op(a, 16, 0)], [np.random.random((12))], self.op_class
+ [a], [self.op(a, 16, 0)], [np.random.random(12)], self.op_class
)
a = dmatrix()
for var in [
@@ -61,7 +61,7 @@ def fft_test4(a):
np.random.random((5, 2, 4, 3)),
np.random.random((2, 3, 4)),
np.random.random((2, 5)),
- np.random.random((5)),
+ np.random.random(5),
]
for fft_test in [fft_test1, fft_test2, fft_test3, fft_test4]:
for pt in pts:
diff --git a/tests/tensor/test_nlinalg.py b/tests/tensor/test_nlinalg.py
index 2e587aef17..b39ac0ba3c 100644
--- a/tests/tensor/test_nlinalg.py
+++ b/tests/tensor/test_nlinalg.py
@@ -331,7 +331,7 @@ def test_eval(self):
A = matrix(dtype=self.dtype)
assert [e.eval({A: [[1]]}) for e in self.op(A)] == [[1.0], [[1.0]]]
x = [[0, 1], [1, 0]]
- w, v = [e.eval({A: x}) for e in self.op(A)]
+ w, v = (e.eval({A: x}) for e in self.op(A))
assert_array_almost_equal(np.dot(x, v), w * v)
@@ -341,8 +341,8 @@ class TestEigh(TestEig):
def test_uplo(self):
S = self.S
a = matrix(dtype=self.dtype)
- wu, vu = [out.eval({a: S}) for out in self.op(a, "U")]
- wl, vl = [out.eval({a: S}) for out in self.op(a, "L")]
+ wu, vu = (out.eval({a: S}) for out in self.op(a, "U"))
+ wl, vl = (out.eval({a: S}) for out in self.op(a, "L"))
assert_array_almost_equal(wu, wl)
assert_array_almost_equal(vu * np.sign(vu[0, :]), vl * np.sign(vl[0, :]))
@@ -451,7 +451,7 @@ def test_numpy_compare(self):
V = vector("V", dtype=config.floatX)
a = rng.random((4, 4)).astype(config.floatX)
- b = rng.random((4)).astype(config.floatX)
+ b = rng.random(4).astype(config.floatX)
A = (
[None, "fro", "inf", "-inf", 1, -1, None, "inf", "-inf", 0, 1, -1, 2, -2],
diff --git a/tests/tensor/test_shape.py b/tests/tensor/test_shape.py
index 830f0f59ca..957d1e76d5 100644
--- a/tests/tensor/test_shape.py
+++ b/tests/tensor/test_shape.py
@@ -438,12 +438,12 @@ def test_bad_shape(self):
specify_shape = SpecifyShape()
x = vector()
- xval = np.random.random((2)).astype(config.floatX)
+ xval = np.random.random(2).astype(config.floatX)
f = pytensor.function([x], specify_shape(x, 2), mode=self.mode)
assert np.array_equal(f(xval), xval)
- xval = np.random.random((3)).astype(config.floatX)
+ xval = np.random.random(3).astype(config.floatX)
with pytest.raises(AssertionError, match="SpecifyShape:.*"):
f(xval)
diff --git a/tests/tensor/test_slinalg.py b/tests/tensor/test_slinalg.py
index cc0043b1f8..dda92b9072 100644
--- a/tests/tensor/test_slinalg.py
+++ b/tests/tensor/test_slinalg.py
@@ -386,7 +386,7 @@ def test_infer_shape(self):
# A must be square
[
np.asarray(rng.random((5, 5)), dtype=config.floatX),
- np.asarray(rng.random((5)), dtype=config.floatX),
+ np.asarray(rng.random(5), dtype=config.floatX),
],
self.op_class,
warn=False,
@@ -492,7 +492,7 @@ def test_expm_grad_2():
rng = np.random.default_rng(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.standard_normal((5, 5))
- w = rng.standard_normal((5)) ** 2
+ w = rng.standard_normal(5) ** 2
A = (np.diag(w**0.5)).dot(A + A.T).dot(np.diag(w ** (-0.5)))
assert not np.allclose(A, A.T)
diff --git a/tests/tensor/test_sort.py b/tests/tensor/test_sort.py
index 9c2262143f..9334776652 100644
--- a/tests/tensor/test_sort.py
+++ b/tests/tensor/test_sort.py
@@ -44,7 +44,7 @@ class TestSort:
def setup_method(self):
self.rng = np.random.default_rng(seed=utt.fetch_seed())
self.m_val = self.rng.random((3, 2))
- self.v_val = self.rng.random((4))
+ self.v_val = self.rng.random(4)
def test1(self):
a = dmatrix()
@@ -98,11 +98,11 @@ def test_None(self):
utt.assert_allclose(gv, gt)
def test_grad_vector(self):
- data = self.rng.random((10)).astype(pytensor.config.floatX)
+ data = self.rng.random(10).astype(pytensor.config.floatX)
utt.verify_grad(sort, [data])
def test_grad_none_axis(self):
- data = self.rng.random((10)).astype(pytensor.config.floatX)
+ data = self.rng.random(10).astype(pytensor.config.floatX)
utt.verify_grad(lambda x: sort(x, None), [data])
utt.verify_grad(lambda x: sort(x, 0), [data])
@@ -185,7 +185,7 @@ def test_argsort():
# Set up
rng = np.random.default_rng(seed=utt.fetch_seed())
m_val = rng.random((3, 2))
- v_val = rng.random((4))
+ v_val = rng.random(4)
# Example 1
a = dmatrix()
diff --git a/tests/tensor/test_special.py b/tests/tensor/test_special.py
index 842f5f03c7..a6b3b765ca 100644
--- a/tests/tensor/test_special.py
+++ b/tests/tensor/test_special.py
@@ -56,7 +56,7 @@ def f(a):
return softmax(a, axis=None)
rng = np.random.default_rng(utt.fetch_seed())
- utt.verify_grad(f, [rng.random((4))])
+ utt.verify_grad(f, [rng.random(4)])
def test_valid_axis(self):
with pytest.raises(TypeError):
diff --git a/tests/tensor/test_subtensor.py b/tests/tensor/test_subtensor.py
index 19eb963a72..f934048885 100644
--- a/tests/tensor/test_subtensor.py
+++ b/tests/tensor/test_subtensor.py
@@ -941,7 +941,7 @@ def fun(x, y):
x = tensor4("x", dtype=self.dtype)
indexes = pytensor.shared(np.int32([1, 2, 3, 4]))
- W = self.shared(np.random.random(((10, 10, 3, 3))).astype(self.dtype))
+ W = self.shared(np.random.random((10, 10, 3, 3)).astype(self.dtype))
h = x + W
h = set_subtensor(h[indexes], h[indexes])
@@ -954,7 +954,7 @@ def fun(x, y):
N = 3
f = self.function([x], g, op=AdvancedIncSubtensor1, N=N)
- f(np.random.random(((10, 10, 3, 3))).astype(self.dtype))
+ f(np.random.random((10, 10, 3, 3)).astype(self.dtype))
def test_adv_sub1_idx_broadcast(self):
# The idx can be a broadcastable vector.
@@ -1484,7 +1484,7 @@ def test_wrong_broadcast(self):
rng = np.random.default_rng(utt.fetch_seed())
def rng_randX(*shape):
- return rng.random((shape)).astype(pytensor.config.floatX)
+ return rng.random(shape).astype(pytensor.config.floatX)
for op in (set_subtensor, inc_subtensor):
for base in (a[:], a[0]):
@@ -1828,7 +1828,7 @@ def test_inc_adv_subtensor_w_matrix(self):
def test_adv_subtensor_w_int_and_matrix(self):
subt = self.ft4[0, :, self.ix2, :]
f = pytensor.function([self.ft4, self.ix2], subt, mode=self.mode)
- ft4v = np.random.random(((2, 3, 4, 5))).astype("float32")
+ ft4v = np.random.random((2, 3, 4, 5)).astype("float32")
ix2v = np.asarray([[0, 1], [1, 0]])
aval = f(ft4v, ix2v)
rval = ft4v[0, :, ix2v, :]
@@ -1837,7 +1837,7 @@ def test_adv_subtensor_w_int_and_matrix(self):
def test_adv_subtensor_w_none_and_matrix(self):
subt = self.ft4[:, None, :, self.ix2, :]
f = pytensor.function([self.ft4, self.ix2], subt, mode=self.mode)
- ft4v = np.random.random(((2, 3, 4, 5))).astype("float32")
+ ft4v = np.random.random((2, 3, 4, 5)).astype("float32")
ix2v = np.asarray([[0, 1], [1, 0]])
aval = f(ft4v, ix2v)
rval = ft4v[:, None, :, ix2v, :]
@@ -1846,7 +1846,7 @@ def test_adv_subtensor_w_none_and_matrix(self):
def test_adv_subtensor_w_slice_and_matrix(self):
subt = self.ft4[:, 0:1, self.ix2, :]
f = pytensor.function([self.ft4, self.ix2], subt, mode=self.mode)
- ft4v = np.random.random(((2, 3, 4, 5))).astype("float32")
+ ft4v = np.random.random((2, 3, 4, 5)).astype("float32")
ix2v = np.asarray([[0, 1], [1, 0]])
aval = f(ft4v, ix2v)
rval = ft4v[:, 0:1, ix2v, :]
@@ -1855,7 +1855,7 @@ def test_adv_subtensor_w_slice_and_matrix(self):
def test_adv_subtensor_w_matrix_and_int(self):
subt = self.ft4[:, :, self.ix2, 0]
f = pytensor.function([self.ft4, self.ix2], subt, mode=self.mode)
- ft4v = np.random.random(((2, 3, 4, 5))).astype("float32")
+ ft4v = np.random.random((2, 3, 4, 5)).astype("float32")
ix2v = np.asarray([[0, 1], [1, 0]])
aval = f(ft4v, ix2v)
rval = ft4v[:, :, ix2v, 0]
@@ -1864,7 +1864,7 @@ def test_adv_subtensor_w_matrix_and_int(self):
def test_adv_subtensor_w_matrix_and_none(self):
subt = self.ft4[:, :, self.ix2, None, :]
f = pytensor.function([self.ft4, self.ix2], subt, mode=self.mode)
- ft4v = np.random.random(((2, 3, 4, 5))).astype("float32")
+ ft4v = np.random.random((2, 3, 4, 5)).astype("float32")
ix2v = np.asarray([[0, 1], [1, 0]])
aval = f(ft4v, ix2v)
rval = ft4v[:, :, ix2v, None, :]
@@ -2095,7 +2095,7 @@ def fun(x, y):
fun,
[
np.random.random((5, 5)).astype(self.dtype),
- np.random.random((2)).astype(self.dtype),
+ np.random.random(2).astype(self.dtype),
],
mode=self.mode,
)
@@ -2107,7 +2107,7 @@ def fun(x, y):
fun,
[
np.random.random((5, 5)).astype(self.dtype),
- np.random.random((2)).astype(self.dtype),
+ np.random.random(2).astype(self.dtype),
],
mode=self.mode,
)
@@ -2122,7 +2122,7 @@ def fun(x, y):
fun,
[
np.random.random((2, 2)).astype(self.dtype),
- np.random.random((2)).astype(self.dtype),
+ np.random.random(2).astype(self.dtype),
],
mode=self.mode,
)
@@ -2136,7 +2136,7 @@ def fun(x, y):
fun,
[
np.random.random((2, 2)).astype(self.dtype),
- np.random.random((2)).astype(self.dtype),
+ np.random.random(2).astype(self.dtype),
],
mode=self.mode,
)
diff --git a/tests/test_gradient.py b/tests/test_gradient.py
index 2e76b119ab..c102d22e06 100644
--- a/tests/test_gradient.py
+++ b/tests/test_gradient.py
@@ -382,7 +382,7 @@ def output(x):
rng = np.random.default_rng([2012, 8, 28])
- vx = rng.standard_normal((2))
+ vx = rng.standard_normal(2)
utt.verify_grad(output, [vx])
@@ -394,7 +394,7 @@ def cost(x, A):
rng = np.random.default_rng([2012, 8, 28])
- vx = rng.standard_normal((2))
+ vx = rng.standard_normal(2)
vA = rng.standard_normal((2, 2))
utt.verify_grad(cost, [vx, vA])
@@ -407,7 +407,7 @@ def output(x, A):
rng = np.random.default_rng([2012, 8, 28])
- vx = rng.standard_normal((2))
+ vx = rng.standard_normal(2)
vA = rng.standard_normal((2, 2))
utt.verify_grad(output, [vx, vA])
@@ -420,7 +420,7 @@ def cost(x, A):
rng = np.random.default_rng([2012, 8, 28])
- vx = rng.standard_normal((2))
+ vx = rng.standard_normal(2)
vA = rng.standard_normal((2, 2))
utt.verify_grad(cost, [vx, vA])
@@ -434,7 +434,7 @@ def output(x, A):
rng = np.random.default_rng([2012, 8, 28])
- vx = rng.standard_normal((2))
+ vx = rng.standard_normal(2)
vA = rng.standard_normal((2, 2))
utt.verify_grad(output, [vx, vA])
@@ -448,7 +448,7 @@ def output(x, A):
rng = np.random.default_rng([2012, 8, 28])
- vx = rng.standard_normal((2))
+ vx = rng.standard_normal(2)
vA = rng.standard_normal((2, 2))
utt.verify_grad(output, [vx, vA])
@@ -483,7 +483,7 @@ def make_grad_func(X):
X = np.cast[int_type](rng.standard_normal((m, d)) * 127.0)
W = np.cast[W.dtype](rng.standard_normal((d, n)))
- b = np.cast[b.dtype](rng.standard_normal((n)))
+ b = np.cast[b.dtype](rng.standard_normal(n))
int_result = int_func(X, W, b)
float_result = float_func(np.cast[float_type](X), W, b)
@@ -508,7 +508,7 @@ def test_grad_disconnected(self):
# the output
f = pytensor.function([x], g)
rng = np.random.default_rng([2012, 9, 5])
- x = np.cast[x.dtype](rng.standard_normal((3)))
+ x = np.cast[x.dtype](rng.standard_normal(3))
g = f(x)
assert np.allclose(g, np.ones(x.shape, dtype=x.dtype))
@@ -629,7 +629,7 @@ def test_known_grads():
inputs = [coeffs, t, x]
rng = np.random.default_rng([2012, 11, 15])
- values = [rng.standard_normal((10)), rng.integers(10), rng.standard_normal()]
+ values = [rng.standard_normal(10), rng.integers(10), rng.standard_normal()]
values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]
true_grads = grad(cost, inputs, disconnected_inputs="ignore")
@@ -742,7 +742,7 @@ def test_subgraph_grad():
inputs = [t, x]
rng = np.random.default_rng([2012, 11, 15])
- values = [rng.standard_normal((2)), rng.standard_normal((3))]
+ values = [rng.standard_normal(2), rng.standard_normal(3)]
values = [np.cast[ipt.dtype](value) for ipt, value in zip(inputs, values)]
wrt = [w2, w1]
@@ -849,8 +849,8 @@ def test_rop(self):
rop = Rop(y, x, v)
f = pytensor.function([x, v], rop, on_unused_input="ignore")
- a = np.asarray(self.rng.standard_normal((5)), dtype=config.floatX)
- u = np.asarray(self.rng.standard_normal((5)), dtype=config.floatX)
+ a = np.asarray(self.rng.standard_normal(5), dtype=config.floatX)
+ u = np.asarray(self.rng.standard_normal(5), dtype=config.floatX)
assert np.count_nonzero(f(a, u)) == 0
diff --git a/tests/test_ifelse.py b/tests/test_ifelse.py
index 0bcc9b51cf..5c0c6ddb2e 100644
--- a/tests/test_ifelse.py
+++ b/tests/test_ifelse.py
@@ -289,7 +289,7 @@ def test_multiple_out_crash(self):
def test_dtype_mismatch(self):
rng = np.random.default_rng(utt.fetch_seed())
- data = rng.random((5)).astype(self.dtype)
+ data = rng.random(5).astype(self.dtype)
x = self.shared(data)
y = at.cast(x * 10, "int8")
cond = iscalar("cond")
@@ -301,7 +301,7 @@ def test_dtype_mismatch(self):
def test_ndim_mismatch(self):
rng = np.random.default_rng(utt.fetch_seed())
- data = rng.random((5)).astype(self.dtype)
+ data = rng.random(5).astype(self.dtype)
x = self.shared(data)
y = col("y", self.dtype)
cond = iscalar("cond")
@@ -313,7 +313,7 @@ def test_ndim_mismatch(self):
def test_broadcast_mismatch(self):
rng = np.random.default_rng(utt.fetch_seed())
- data = rng.random((5)).astype(self.dtype)
+ data = rng.random(5).astype(self.dtype)
x = self.shared(data)
# print x.broadcastable
y = row("y", self.dtype)
@@ -508,7 +508,7 @@ def test_grad_test_values(self):
pytensor.grad(ifelse(0, x, x), x)
def test_grad_int_value(self):
- w = pytensor.shared(np.random.random((10)))
+ w = pytensor.shared(np.random.random(10))
b = pytensor.shared(np.random.random())
params = [w, b]
diff --git a/versioneer.py b/versioneer.py
index 8ac884a98a..d9ead36baf 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1,4 +1,3 @@
-
# Version: 0.23.dev0
"""The Versioneer - like a rocketeer, but for versions.
@@ -342,7 +341,7 @@ def get_config_from_root(root):
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
- with open(setup_cfg, "r") as cfg_file:
+ with open(setup_cfg) as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
@@ -412,7 +411,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
return None, None
else:
if verbose:
- print("unable to find command, tried %s" % (commands,))
+ print(f"unable to find command, tried {commands}")
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
@@ -1093,7 +1092,7 @@ def git_get_keywords(versionfile_abs):
# _version.py.
keywords = {}
try:
- with open(versionfile_abs, "r") as fobj:
+ with open(versionfile_abs) as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
@@ -1332,7 +1331,7 @@ def do_vcs_install(manifest_in, versionfile_source, ipy):
files.append(versioneer_file)
present = False
try:
- with open(".gitattributes", "r") as fobj:
+ with open(".gitattributes") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
@@ -1414,7 +1413,7 @@ def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
- print("set %s to '%s'" % (filename, versions["version"]))
+ print("set {} to '{}'".format(filename, versions["version"]))
def plus_or_dot(pieces):
@@ -1714,7 +1713,7 @@ def get_versions(verbose=False):
try:
ver = versions_from_file(versionfile_abs)
if verbose:
- print("got version from file %s %s" % (versionfile_abs, ver))
+ print(f"got version from file {versionfile_abs} {ver}")
return ver
except NotThisMethod:
pass
@@ -2031,7 +2030,7 @@ def do_setup():
"__init__.py")
if os.path.exists(ipy):
try:
- with open(ipy, "r") as f:
+ with open(ipy) as f:
old = f.read()
except OSError:
old = ""
@@ -2058,7 +2057,7 @@ def do_setup():
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
- with open(manifest_in, "r") as f:
+ with open(manifest_in) as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
@@ -2095,7 +2094,7 @@ def scan_setup_py():
found = set()
setters = False
errors = 0
- with open("setup.py", "r") as f:
+ with open("setup.py") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")