Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 50 additions & 17 deletions ddtrace/profiling/bootstrap/sitecustomize.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,26 +13,59 @@


def start_profiler():
if hasattr(bootstrap, "profiler"):
bootstrap.profiler.stop()
"""Start profiler if enabled by configuration.

This function respects DD_PROFILING_ENABLED setting and will not start
the profiler if it's disabled, even when called explicitly.
"""
from ddtrace.settings.profiling import config as profiling_config

if not profiling_config.enabled:
LOG.debug("start_profiler() called but DD_PROFILING_ENABLED is disabled")
return

if hasattr(bootstrap, "profiler") and bootstrap.profiler is not None:
try:
bootstrap.profiler.stop()
except Exception:
LOG.debug("Failed to stop existing profiler", exc_info=True)

# Export the profiler so we can introspect it if needed
bootstrap.profiler = profiler.Profiler()
bootstrap.profiler.start()
LOG.debug("Profiler started successfully")


def _maybe_start_profiler():
"""Auto-start profiler only if enabled and platform is supported."""
if platform.system() == "Linux" and not (sys.maxsize > (1 << 32)):
LOG.error(
"The Datadog Profiler is not supported on 32-bit Linux systems. "
"To use the profiler, please upgrade to a 64-bit Linux system. "
"If you believe this is an error or need assistance, please report it at "
"https://github.com/DataDog/dd-trace-py/issues"
)
return False

if platform.system() == "Windows":
LOG.error(
"The Datadog Profiler is not supported on Windows. "
"To use the profiler, please use a 64-bit Linux or macOS system. "
"If you need assistance related to Windows support for the Profiler, please open a ticket at "
"https://github.com/DataDog/dd-trace-py/issues"
)
return False

# Check config before starting
from ddtrace.settings.profiling import config as profiling_config

if not profiling_config.enabled:
LOG.debug("Profiler auto-start skipped: DD_PROFILING_ENABLED is disabled")
return False

if platform.system() == "Linux" and not (sys.maxsize > (1 << 32)):
LOG.error(
"The Datadog Profiler is not supported on 32-bit Linux systems. "
"To use the profiler, please upgrade to a 64-bit Linux system. "
"If you believe this is an error or need assistance, please report it at "
"https://github.com/DataDog/dd-trace-py/issues"
)
elif platform.system() == "Windows":
LOG.error(
"The Datadog Profiler is not supported on Windows. "
"To use the profiler, please use a 64-bit Linux or macOS system. "
"If you need assistance related to Windows support for the Profiler, please open a ticket at "
"https://github.com/DataDog/dd-trace-py/issues"
)
else:
start_profiler()
return True


# Auto-start with config check
_profiler_started = _maybe_start_profiler()
32 changes: 30 additions & 2 deletions ddtrace/profiling/profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,43 @@ def stop(self, flush=True):
pass

def _restart_on_fork(self):
"""Handle fork: stop parent profiler, conditionally start in child.

This respects the child process's DD_PROFILING_ENABLED setting,
which may differ from the parent's. The config is reloaded by
the forksafe hook (_reload_config_after_fork) before this runs.
"""
# Step 1: Always stop parent profiler
# Be sure to stop the parent first, since it might have to e.g. unpatch functions
# Do not flush data as we don't want to have multiple copies of the parent profile exported.
try:
self._profiler.stop(flush=False, join=False)
except service.ServiceStatusError:
# This can happen in uWSGI mode: the children won't have the _profiler started from the master process
pass
self._profiler = self._profiler.copy()
self._profiler.start()

# Step 2: Check if profiling is enabled in THIS process
# Config was already reloaded by forksafe hook (_reload_config_after_fork)
from ddtrace.settings.profiling import config as profiling_config

if not profiling_config.enabled:
LOG.debug(
"Profiler not restarted in child process (PID=%d): DD_PROFILING_ENABLED is disabled",
os.getpid(),
)
return

# Step 3: Start fresh profiler in child
try:
self._profiler = self._profiler.copy()
self._profiler.start()
LOG.debug("Profiler successfully restarted in child process (PID=%d)", os.getpid())
except Exception:
LOG.error(
"Failed to restart profiler in child process (PID=%d)",
os.getpid(),
exc_info=True,
)

def __getattr__(
self,
Expand Down
99 changes: 99 additions & 0 deletions ddtrace/settings/profiling.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ def _enrich_tags(tags) -> t.Dict[str, str]:
class ProfilingConfig(DDConfig):
__prefix__ = "dd.profiling"

# Nested configs that need metadata synchronization in reload_from_env()
# Update this list when adding new nested configs via .include()
_NESTED_CONFIGS = ["stack", "lock", "memory", "heap", "pytorch"]

# Note that the parser here has a side-effect, since SSI has changed the once-truthy value of the envvar to
# truthy + "auto", which has a special meaning.
enabled = DDConfig.v(
Expand All @@ -173,6 +177,51 @@ class ProfilingConfig(DDConfig):
help="Enable Datadog profiling when using ``ddtrace-run``",
)

def reload_from_env(self):
"""Reload configuration from environment variables in-place.

This method updates the existing config object with fresh values from
environment variables, preserving all existing references to this object.
This is critical for maintaining consistency across modules that have
already imported this config instance.
"""
# Create a temporary new config to read fresh environment variables
new_config = ProfilingConfig()

# Copy all configuration values using the DDConfig items iterator
# This properly handles nested configs (stack, lock, memory, heap, pytorch)
for name, env_var in type(self).items(recursive=True):
# Get the value from the new config
new_value = new_config
for part in name.split("."):
new_value = getattr(new_value, part)

# Set the value on self
current = self
parts = name.split(".")
for part in parts[:-1]:
current = getattr(current, part)
setattr(current, parts[-1], new_value)

# Explicitly update derived fields that depend on other config values
# These are defined with DDConfig.d() and need to be recalculated
# stack.v2_enabled depends on stack._v2_enabled and stack.enabled
self.stack.v2_enabled = new_config.stack.v2_enabled
# heap.sample_size is derived from heap._sample_size and system memory
self.heap.sample_size = new_config.heap.sample_size

# Update internal tracking attributes for root config
self._value_source = new_config._value_source
self.config_id = new_config.config_id

# Update internal tracking attributes for nested configs
# This ensures value_source() and config_id work correctly for nested settings
for nested_name in self._NESTED_CONFIGS:
nested_config = getattr(self, nested_name)
new_nested_config = getattr(new_config, nested_name)
nested_config._value_source = new_nested_config._value_source
nested_config.config_id = new_nested_config.config_id

agentless = DDConfig.v(
bool,
"agentless",
Expand Down Expand Up @@ -468,3 +517,53 @@ def config_str(config):
configured_features.append("CAP" + str(config.capture_pct))
configured_features.append("MAXF" + str(config.max_frames))
return "_".join(configured_features)


def _reload_config_after_fork():
"""Reload configuration after fork to respect child process environment.

This is critical for multi-worker servers like Gunicorn where:
1. Parent process may have DD_PROFILING_ENABLED=true
2. Child workers want DD_PROFILING_ENABLED=false
3. Environment variables are changed after fork

The config is re-read from environment variables in the child process.
Instead of creating a new instance, we reload the existing instance in-place
to preserve all existing references to the config object.
"""
global config

# Store old value for logging
old_enabled = config.enabled

# Reload config in-place from environment variables
# This preserves all existing references to the config object
config.reload_from_env()

# Re-check ddup availability
global ddup_is_available
if not ddup_is_available:
config.enabled = False

# Re-check stack_v2 availability
global stack_v2_is_available
if config.stack.v2_enabled and not stack_v2_is_available:
config.stack.v2_enabled = False

# Enrich tags again
config.tags = _enrich_tags(config.tags)

if old_enabled != config.enabled:
logger.debug(
"Profiling config changed after fork (PID=%d): enabled=%s -> %s",
os.getpid(),
old_enabled,
config.enabled,
)


# Register fork hook (executed before profiler restart hooks)
from ddtrace.internal import forksafe # noqa: E402


forksafe.register(_reload_config_after_fork)
Loading
Loading