Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions pytensor/compile/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def extended_open(filename, mode="r"):

logger = logging.getLogger("pytensor.compile.profiling")

pytensor_imported_time: float = time.time()
pytensor_imported_time: float = time.perf_counter()
total_fct_exec_time: float = 0.0
total_graph_rewrite_time: float = 0.0
total_time_linker: float = 0.0
Expand Down Expand Up @@ -165,7 +165,7 @@ def print_global_stats():
print(
(
"Global stats: ",
f"Time elasped since PyTensor import = {time.time() - pytensor_imported_time:6.3f}s, "
f"Time elasped since PyTensor import = {time.perf_counter() - pytensor_imported_time:6.3f}s, "
f"Time spent in PyTensor functions = {total_fct_exec_time:6.3f}s, "
"Time spent compiling PyTensor functions: "
f"rewriting = {total_graph_rewrite_time:6.3f}s, linking = {total_time_linker:6.3f}s ",
Expand Down Expand Up @@ -831,7 +831,7 @@ def summary_globals(self, file):
f"Time in all call to pytensor.grad() {pytensor.gradient.grad_time:e}s",
file=file,
)
total_time = time.time() - pytensor_imported_time
total_time = time.perf_counter() - pytensor_imported_time
print(f"Time since pytensor import {total_time:.3f}s", file=file)

def summary_memory(self, file, N=None):
Expand Down Expand Up @@ -1299,9 +1299,9 @@ def compute_max_stats(running_memory, stats):
# Config: whether print min memory peak
if config.profiling__min_peak_memory:
node_list = fgraph.apply_nodes
ttt = time.time()
ttt = time.perf_counter()
min_peak = count_minimum_peak(node_list, fgraph, nodes_mem)
min_peak_time += time.time() - ttt
min_peak_time += time.perf_counter() - ttt
min_max_peak = max(min_max_peak, min_peak)

del fgraph, nodes_mem
Expand Down
4 changes: 2 additions & 2 deletions pytensor/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ def grad(
respect to the output, then a zero variable is returned.

"""
t0 = time.time()
t0 = time.perf_counter()

if cost is None:
if known_grads is None:
Expand Down Expand Up @@ -643,7 +643,7 @@ def handle_disconnected(var):
else:
assert return_disconnected.lower() == "disconnected"

t1 = time.time()
t1 = time.perf_counter()
global grad_time
grad_time += t1 - t0

Expand Down
4 changes: 2 additions & 2 deletions pytensor/graph/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ def validate_(self, fgraph):
exception. replace_all_validate will print out the
verbose output. Or it has to be done here before raise.
"""
t0 = time.time()
t0 = time.perf_counter()
try:
ret = fgraph.execute_callbacks("validate")
except Exception as e:
Expand All @@ -494,7 +494,7 @@ def validate_(self, fgraph):
reason = uf_info.function
print(f"validate failed on node {r}.\n Reason: {reason}, {e}")
raise
t1 = time.time()
t1 = time.perf_counter()
if fgraph.profile:
fgraph.profile.validate_time += t1 - t0
return ret
Expand Down
8 changes: 4 additions & 4 deletions pytensor/graph/fg.py
Original file line number Diff line number Diff line change
Expand Up @@ -717,7 +717,7 @@ def execute_callbacks(self, name: str, *args, **kwargs) -> None:
a method called after name.

"""
t0 = time.time()
t0 = time.perf_counter()
for feature in self._features:
try:
fn = getattr(feature, name)
Expand All @@ -726,10 +726,10 @@ def execute_callbacks(self, name: str, *args, **kwargs) -> None:
# try; the AttributeError really must come from feature.${name}
# not existing
continue
tf0 = time.time()
tf0 = time.perf_counter()
fn(self, *args, **kwargs)
self.execute_callbacks_times[feature] += time.time() - tf0
self.execute_callbacks_time += time.time() - t0
self.execute_callbacks_times[feature] += time.perf_counter() - tf0
self.execute_callbacks_time += time.perf_counter() - t0

def collect_callbacks(self, name: str, *args) -> Dict[Feature, Any]:
"""Collects callbacks
Expand Down
54 changes: 27 additions & 27 deletions pytensor/graph/rewriting/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,9 +298,9 @@ def apply(self, fgraph):
for rewriter in self.data:
try:
nb_nodes_before = len(fgraph.apply_nodes)
t0 = time.time()
t0 = time.perf_counter()
sub_prof = rewriter.apply(fgraph)
l.append(float(time.time() - t0))
l.append(float(time.perf_counter() - t0))
sub_profs.append(sub_prof)
nb_nodes.append((nb_nodes_before, len(fgraph.apply_nodes)))
if fgraph.profile:
Expand Down Expand Up @@ -701,7 +701,7 @@ def add_requirements(self, fgraph):
def apply(self, fgraph):
sched = fgraph.merge_feature.scheduled
nb_fail = 0
t0 = time.time()
t0 = time.perf_counter()
if fgraph.profile:
validate_before = fgraph.profile.validate_time
callback_before = fgraph.execute_callbacks_time
Expand Down Expand Up @@ -807,7 +807,7 @@ def apply(self, fgraph):

return (
nb_fail,
time.time() - t0,
time.perf_counter() - t0,
validate_time,
callback_time,
callbacks_time,
Expand Down Expand Up @@ -1066,9 +1066,9 @@ def get_rewrites(self, node):
return self.track_dict[type(node.op)]

def time_call(self, fn):
start = time.time()
start = time.perf_counter()
fn()
return time.time() - start
return time.perf_counter() - start


class FromFunctionNodeRewriter(NodeRewriter):
Expand Down Expand Up @@ -1303,9 +1303,9 @@ def transform(self, fgraph, node):

new_repl = None
for rewrite in rewrites:
rewrite_start = time.time()
rewrite_start = time.perf_counter()
new_repl = rewrite.transform(fgraph, node)
rewrite_finish = time.time()
rewrite_finish = time.perf_counter()
if self.profile:
self.time_rewrites[rewrite] += rewrite_start - rewrite_finish
self.process_count[rewrite] += 1
Expand Down Expand Up @@ -2026,9 +2026,9 @@ def apply(self, fgraph, start_from=None):
start_from = fgraph.outputs
callback_before = fgraph.execute_callbacks_time
nb_nodes_start = len(fgraph.apply_nodes)
t0 = time.time()
t0 = time.perf_counter()
q = deque(io_toposort(fgraph.inputs, start_from))
io_t = time.time() - t0
io_t = time.perf_counter() - t0

def importer(node):
if node is not current_node:
Expand All @@ -2039,7 +2039,7 @@ def importer(node):
)
nb = 0
try:
t0 = time.time()
t0 = time.perf_counter()
while q:
if self.order == "out_to_in":
node = q.pop()
Expand All @@ -2049,7 +2049,7 @@ def importer(node):
continue
current_node = node
nb += self.process_node(fgraph, node)
loop_t = time.time() - t0
loop_t = time.perf_counter() - t0
finally:
self.detach_updater(fgraph, u)

Expand Down Expand Up @@ -2367,9 +2367,9 @@ def apply_cleanup(profs_dict):
for crewriter in self.cleanup_rewriters:
change_tracker.reset()
nb = change_tracker.nb_imported
t_rewrite = time.time()
t_rewrite = time.perf_counter()
sub_prof = crewriter.apply(fgraph)
time_rewriters[crewriter] += time.time() - t_rewrite
time_rewriters[crewriter] += time.perf_counter() - t_rewrite
profs_dict[crewriter].append(sub_prof)
if change_tracker.changed:
process_count.setdefault(crewriter, 0)
Expand All @@ -2381,7 +2381,7 @@ def apply_cleanup(profs_dict):

while changed and not max_use_abort:
process_count = {}
t0 = time.time()
t0 = time.perf_counter()
changed = False
iter_cleanup_sub_profs = {}
for crewrite in self.cleanup_rewriters:
Expand All @@ -2392,9 +2392,9 @@ def apply_cleanup(profs_dict):
for grewrite in self.global_rewriters:
change_tracker.reset()
nb = change_tracker.nb_imported
t_rewrite = time.time()
t_rewrite = time.perf_counter()
sub_prof = grewrite.apply(fgraph)
time_rewriters[grewrite] += time.time() - t_rewrite
time_rewriters[grewrite] += time.perf_counter() - t_rewrite
sub_profs.append(sub_prof)
if change_tracker.changed:
process_count.setdefault(grewrite, 0)
Expand All @@ -2409,13 +2409,13 @@ def apply_cleanup(profs_dict):
)
global_sub_profs.append(sub_profs)

global_rewriter_timing.append(float(time.time() - t0))
global_rewriter_timing.append(float(time.perf_counter() - t0))

changed |= apply_cleanup(iter_cleanup_sub_profs)

topo_t0 = time.time()
topo_t0 = time.perf_counter()
q = deque(io_toposort(fgraph.inputs, start_from))
io_toposort_timing.append(time.time() - topo_t0)
io_toposort_timing.append(time.perf_counter() - topo_t0)

nb_nodes.append(len(q))
max_nb_nodes = max(max_nb_nodes, len(q))
Expand Down Expand Up @@ -2443,11 +2443,11 @@ def chin(node, i, r, new_r, reason):
current_node = node
for node_rewriter in self.node_tracker.get_trackers(node.op):
nb = change_tracker.nb_imported
t_rewrite = time.time()
t_rewrite = time.perf_counter()
node_rewriter_change = self.process_node(
fgraph, node, node_rewriter
)
time_rewriters[node_rewriter] += time.time() - t_rewrite
time_rewriters[node_rewriter] += time.perf_counter() - t_rewrite
if not node_rewriter_change:
continue
process_count.setdefault(node_rewriter, 0)
Expand All @@ -2469,13 +2469,13 @@ def chin(node, i, r, new_r, reason):

# Apply final rewriters
sub_profs = []
t_before_final_rewrites = time.time()
t_before_final_rewrites = time.perf_counter()
for grewrite in self.final_rewriters:
change_tracker.reset()
nb = change_tracker.nb_imported
t_rewrite = time.time()
t_rewrite = time.perf_counter()
sub_prof = grewrite.apply(fgraph)
time_rewriters[grewrite] += time.time() - t_rewrite
time_rewriters[grewrite] += time.perf_counter() - t_rewrite
sub_profs.append(sub_prof)
if change_tracker.changed:
process_count.setdefault(grewrite, 0)
Expand All @@ -2490,7 +2490,7 @@ def chin(node, i, r, new_r, reason):
)
final_sub_profs.append(sub_profs)

global_rewriter_timing[-1] += time.time() - t_before_final_rewrites
global_rewriter_timing[-1] += time.perf_counter() - t_before_final_rewrites

changed |= apply_cleanup(iter_cleanup_sub_profs)

Expand All @@ -2504,7 +2504,7 @@ def chin(node, i, r, new_r, reason):
cleanup_sub_profs.append(c_sub_profs)

loop_process_count.append(process_count)
loop_timing.append(float(time.time() - t0))
loop_timing.append(float(time.perf_counter() - t0))

end_nb_nodes = len(fgraph.apply_nodes)

Expand Down
20 changes: 11 additions & 9 deletions pytensor/link/c/cmodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,11 +326,11 @@ def dlimport(fullpath, suffix=None):
global import_time
try:
importlib.invalidate_caches()
t0 = time.time()
t0 = time.perf_counter()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
rval = __import__(module_name, {}, {}, [module_name])
t1 = time.time()
t1 = time.perf_counter()
import_time += t1 - t0
if not rval:
raise Exception("__import__ failed", fullpath)
Expand Down Expand Up @@ -771,7 +771,7 @@ def refresh(self, age_thresh_use=None, delete_if_problem=False, cleanup=True):
"""
if age_thresh_use is None:
age_thresh_use = self.age_thresh_use
start_time = time.time()
start_time = time.perf_counter()
too_old_to_use = []

to_delete = []
Expand All @@ -786,7 +786,7 @@ def rmtree_empty(*args, **kwargs):
to_delete_empty.append((args, kwargs))

# add entries that are not in the entry_from_key dictionary
time_now = time.time()
time_now = time.perf_counter()
# Go through directories in alphabetical order to ensure consistent
# behavior.
try:
Expand Down Expand Up @@ -956,7 +956,7 @@ def unpickle_failure():
# directories in alphabetical order so as to make
# sure all new processes only use the first one.
if cleanup:
age = time.time() - last_access_time(entry)
age = time.perf_counter() - last_access_time(entry)
if delete_if_problem or age > self.age_thresh_del:
rmtree(
root,
Expand Down Expand Up @@ -1063,7 +1063,9 @@ def unpickle_failure():
if not files:
_rmtree(*a, **kw)

_logger.debug(f"Time needed to refresh cache: {time.time() - start_time}")
_logger.debug(
f"Time needed to refresh cache: {time.perf_counter() - start_time}"
)

return too_old_to_use

Expand Down Expand Up @@ -1269,7 +1271,7 @@ def check_key(self, key, key_pkl):
Its associated pickled file containing a KeyData.

"""
start_time = time.time()
start_time = time.perf_counter()
# Verify that when we reload the KeyData from the pickled file, the
# same key can be found in it, and is not equal to more than one
# other key.
Expand Down Expand Up @@ -1317,7 +1319,7 @@ def check_key(self, key, key_pkl):
f"The keys are:\n {other}\nand\n {key}\n(found in {key_pkl})."
)

self.time_spent_in_check_key += time.time() - start_time
self.time_spent_in_check_key += time.perf_counter() - start_time

# default 31 days
age_thresh_del = config.cmodule__age_thresh_use + 60 * 60 * 24 * 7
Expand Down Expand Up @@ -1506,7 +1508,7 @@ def clear_unversioned(self, min_age=None):
assert key[0]

to_del = []
time_now = time.time()
time_now = time.perf_counter()
for filename in os.listdir(self.dirname):
if filename.startswith("tmp"):
try:
Expand Down
Loading