diff --git a/CHANGELOG.md b/CHANGELOG.md index 70b13589e3..8c8f0c05bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#4737](https://github.com/open-telemetry/opentelemetry-python/pull/4737)) - logs: add warnings for classes that would be deprecated and renamed in 1.39.0 ([#4771](https://github.com/open-telemetry/opentelemetry-python/pull/4771)) +- Add `minimum_severity_level` and `trace_based_sampling` logger parameters to filter logs + ([#4765](https://github.com/open-telemetry/opentelemetry-python/pull/4765)) ## Version 1.37.0/0.58b0 (2025-09-11) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py index dbb108b7db..4aa5957e87 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/__init__.py @@ -18,6 +18,7 @@ LogDeprecatedInitWarning, LogDroppedAttributesWarning, Logger, + LoggerConfig, LoggerProvider, LoggingHandler, LogLimits, @@ -35,4 +36,5 @@ "LogRecordProcessor", "LogDeprecatedInitWarning", "LogDroppedAttributesWarning", + "LoggerConfig", ] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py index 9e2d3f7d7f..00c95d24cd 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/_logs/_internal/__init__.py @@ -11,12 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# pylint: disable=too-many-lines + from __future__ import annotations import abc import atexit import base64 import concurrent.futures +import fnmatch import json import logging import threading @@ -61,6 +65,8 @@ _logger = logging.getLogger(__name__) +LoggerConfigurator = Callable[[InstrumentationScope], "LoggerConfig | None"] + _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128 _ENV_VALUE_UNSET = "" @@ -96,6 +102,81 @@ class LogDeprecatedInitWarning(UserWarning): warnings.simplefilter("once", LogDeprecatedInitWarning) +class LoggerConfig: + def __init__( + self, + disabled: bool = False, + minimum_severity_level: SeverityNumber = SeverityNumber.UNSPECIFIED, + trace_based_sampling: bool = False, + ): + """Initialize LoggerConfig with specified parameters. + + Args: + disabled: A boolean indication of whether the logger is enabled. + If not explicitly set, defaults to False (i.e. Loggers are enabled by default). + If True, the logger behaves equivalently to a No-op Logger. + minimum_severity_level: A SeverityNumber indicating the minimum severity level + for log records to be processed. If not explicitly set, defaults to UNSPECIFIED (0). + If a log record's SeverityNumber is specified and is less than the configured + minimum_severity_level, the log record is dropped by the Logger. + trace_based_sampling: A boolean indication of whether the logger should only + process log records associated with sampled traces. If not explicitly set, + defaults to False. If True, log records associated with unsampled traces + are dropped by the Logger. + """ + self.disabled = disabled + self.minimum_severity_level = minimum_severity_level + self.trace_based_sampling = trace_based_sampling + + def __repr__(self): + return ( + f"LoggerConfig(disabled={self.disabled}, " + f"minimum_severity_level={self.minimum_severity_level}, " + f"trace_based_sampling={self.trace_based_sampling})" + ) + + +def create_logger_configurator_by_name( + logger_configs: dict[str, LoggerConfig], +) -> LoggerConfigurator: + """Create a LoggerConfigurator that selects configuration based on logger name. + + Args: + logger_configs: A dictionary mapping logger names to LoggerConfig instances. + Loggers not found in this mapping will use the default config. + + Returns: + A LoggerConfigurator function that can be used with LoggerProvider. + """ + + def configurator(scope: InstrumentationScope) -> LoggerConfig | None: + return logger_configs.get(scope.name) + + return configurator + + +def create_logger_configurator_with_pattern( + patterns: list[tuple[str, LoggerConfig]], +) -> LoggerConfigurator: + """Create a LoggerConfigurator that matches logger names using patterns. + + Args: + patterns: A list of (pattern, config) tuples. Patterns are matched in order, + and the first match is used. Use '*' as a wildcard. + + Returns: + A LoggerConfigurator function that can be used with LoggerProvider. + """ + + def configurator(scope: InstrumentationScope) -> LoggerConfig | None: + for pattern, config in patterns: + if fnmatch.fnmatch(scope.name, pattern): + return config + return None + + return configurator + + class LogLimits: """This class is based on a SpanLimits class in the Tracing module. @@ -685,7 +766,15 @@ def __init__( ConcurrentMultiLogRecordProcessor, ], instrumentation_scope: InstrumentationScope, + config: LoggerConfig | None = None, + min_severity_level: SeverityNumber = SeverityNumber.UNSPECIFIED, + trace_based_sampling: bool = False, ): + if config is not None: + self._config = config + else: + self._config = LoggerConfig() + super().__init__( instrumentation_scope.name, instrumentation_scope.version, @@ -695,11 +784,30 @@ def __init__( self._resource = resource self._multi_log_record_processor = multi_log_record_processor self._instrumentation_scope = instrumentation_scope + self._min_severity_level = min_severity_level + self._trace_based_sampling = trace_based_sampling @property def resource(self): return self._resource + @property + def config(self): + return self._config + + @property + def instrumentation_scope(self): + """Get the instrumentation scope for this logger.""" + return self._instrumentation_scope + + def update_config(self, config: LoggerConfig) -> None: + """Update the logger's configuration. + + Args: + config: The new LoggerConfig to use. + """ + self._config = config + @overload def emit( self, @@ -758,7 +866,20 @@ def emit( record=record, resource=self._resource ) - log_data = LogData(record, self._instrumentation_scope) + if self._config.disabled: + return + + if is_less_than_min_severity( + record, self._config.minimum_severity_level + ): + return + + if should_drop_logs_for_unsampled_traces( + record, self._config.trace_based_sampling + ): + return + + log_data = LogData(record, self._instrumentation_scope) self._multi_log_record_processor.on_emit(log_data) @@ -771,6 +892,9 @@ def __init__( multi_log_record_processor: SynchronousMultiLogRecordProcessor | ConcurrentMultiLogRecordProcessor | None = None, + min_severity_level: SeverityNumber = SeverityNumber.UNSPECIFIED, + trace_based_sampling: bool = False, + logger_configurator: LoggerConfigurator | None = None, ): if resource is None: self._resource = Resource.create({}) @@ -786,6 +910,23 @@ def __init__( self._at_exit_handler = atexit.register(self.shutdown) self._logger_cache = {} self._logger_cache_lock = Lock() + self._min_severity_level = min_severity_level + self._trace_based_sampling = trace_based_sampling + + if logger_configurator is not None: + self._logger_configurator = logger_configurator + else: + + def default_configurator( + scope: InstrumentationScope, + ) -> LoggerConfig: + return LoggerConfig( + disabled=self._disabled, + minimum_severity_level=self._min_severity_level, + trace_based_sampling=self._trace_based_sampling, + ) + + self._logger_configurator = default_configurator @property def resource(self): @@ -798,15 +939,24 @@ def _get_logger_no_cache( schema_url: str | None = None, attributes: _ExtendedAttributes | None = None, ) -> Logger: + instrumentation_scope = InstrumentationScope( + name, + version, + schema_url, + attributes, + ) + config = self._logger_configurator(instrumentation_scope) + if config is None: + config = LoggerConfig( + disabled=self._disabled, + minimum_severity_level=self._min_severity_level, + trace_based_sampling=self._trace_based_sampling, + ) return Logger( self._resource, self._multi_log_record_processor, - InstrumentationScope( - name, - version, - schema_url, - attributes, - ), + instrumentation_scope, + config=config, ) def _get_logger_cached( @@ -854,6 +1004,22 @@ def add_log_record_processor( log_record_processor ) + def set_logger_configurator( + self, configurator: LoggerConfigurator + ) -> None: + """Update the logger configurator and apply the new configuration to all existing loggers.""" + with self._logger_cache_lock: + self._logger_configurator = configurator + for logger in self._logger_cache.values(): + new_config = configurator(logger.instrumentation_scope) + if new_config is None: + new_config = LoggerConfig( + disabled=self._disabled, + minimum_severity_level=self._min_severity_level, + trace_based_sampling=self._trace_based_sampling, + ) + logger.update_config(new_config) + def shutdown(self): """Shuts down the log processors.""" self._multi_log_record_processor.shutdown() @@ -933,3 +1099,54 @@ def std_to_otel(levelno: int) -> SeverityNumber: if levelno > 53: return SeverityNumber.FATAL4 return _STD_TO_OTEL[levelno] + + +def is_less_than_min_severity( + record: LogRecord, min_severity: SeverityNumber +) -> bool: + """Checks if the log record's severity number is less than the minimum severity level. + + Args: + record: The log record to be processed. + min_severity: The minimum severity level. + + Returns: + True if the log record's severity number is less than the minimum + severity level, False otherwise. Log records with an unspecified severity (i.e. `0`) + are not affected by this parameter and therefore bypass minimum severity filtering. + """ + if record.severity_number is not None: + if ( + min_severity is not None + and min_severity != SeverityNumber.UNSPECIFIED + and record.severity_number.value < min_severity.value + ): + return True + return False + + +def should_drop_logs_for_unsampled_traces( + record: LogRecord, trace_based_sampling_flag: bool +) -> bool: + """Determines whether the logger should drop log records associated with unsampled traces. + + If `trace_based_sampling` is `true`, log records associated with unsampled traces are dropped by the `Logger`. + A log record is considered associated with an unsampled trace if it has a valid `SpanId` and its + `TraceFlags` indicate that the trace is unsampled. A log record that isn't associated with a trace + context is not affected by this parameter and therefore bypasses trace-based filtering. + + Args: + record: The log record to be processed. + trace_based_sampling_flag: A boolean flag indicating whether trace-based filtering is enabled. If not explicitly set, + the `trace_based_sampling` parameter is set to `false` + + Returns: + True if the log record should be dropped due to being associated with an unsampled trace. + """ + if trace_based_sampling_flag: + if record.context is not None: + span = get_current_span(record.context) + span_context = span.get_span_context() + if span_context.is_valid and not span_context.trace_flags.sampled: + return True + return False diff --git a/opentelemetry-sdk/tests/logs/test_logs.py b/opentelemetry-sdk/tests/logs/test_logs.py index e4849e07a2..361842a3e3 100644 --- a/opentelemetry-sdk/tests/logs/test_logs.py +++ b/opentelemetry-sdk/tests/logs/test_logs.py @@ -18,7 +18,7 @@ from unittest.mock import Mock, patch from opentelemetry._logs import LogRecord as APILogRecord -from opentelemetry._logs import SeverityNumber +from opentelemetry._logs import NoOpLogger, SeverityNumber from opentelemetry.context import get_current from opentelemetry.sdk._logs import ( Logger, @@ -26,8 +26,10 @@ LogRecord, ) from opentelemetry.sdk._logs._internal import ( - NoOpLogger, + LoggerConfig, SynchronousMultiLogRecordProcessor, + create_logger_configurator_by_name, + create_logger_configurator_with_pattern, ) from opentelemetry.sdk.environment_variables import OTEL_SDK_DISABLED from opentelemetry.sdk.resources import Resource @@ -83,7 +85,9 @@ def test_get_logger_with_sdk_disabled(self): @patch.object(Resource, "create") def test_logger_provider_init(self, resource_patch): - logger_provider = LoggerProvider() + logger_provider = LoggerProvider( + min_severity_level=SeverityNumber.DEBUG4, trace_based_sampling=True + ) resource_patch.assert_called_once() self.assertIsNotNone(logger_provider._resource) self.assertTrue( @@ -92,12 +96,16 @@ def test_logger_provider_init(self, resource_patch): SynchronousMultiLogRecordProcessor, ) ) + self.assertEqual( + logger_provider._min_severity_level, SeverityNumber.DEBUG4 + ) + self.assertTrue(logger_provider._trace_based_sampling) self.assertIsNotNone(logger_provider._at_exit_handler) class TestLogger(unittest.TestCase): @staticmethod - def _get_logger(): + def _get_logger(config=None): log_record_processor_mock = Mock() logger = Logger( resource=Resource.create({}), @@ -108,6 +116,7 @@ def _get_logger(): "schema_url", {"an": "attribute"}, ), + config=config, ) return logger, log_record_processor_mock @@ -171,3 +180,446 @@ def test_can_emit_with_keywords_arguments(self): self.assertEqual(log_record.attributes, {"some": "attributes"}) self.assertEqual(log_record.event_name, "event_name") self.assertEqual(log_record.resource, logger.resource) + + def test_emit_logrecord_with_min_severity_filtering(self): + """Test that logs below minimum severity are filtered out""" + config = LoggerConfig(minimum_severity_level=SeverityNumber.DEBUG4) + logger, log_record_processor_mock = self._get_logger(config) + + log_record_info = LogRecord( + observed_timestamp=0, + body="info log line", + severity_number=SeverityNumber.DEBUG, + severity_text="DEBUG", + ) + + logger.emit(log_record_info) + log_record_processor_mock.on_emit.assert_not_called() + + log_record_processor_mock.reset_mock() + + log_record_error = LogRecord( + observed_timestamp=0, + body="error log line", + severity_number=SeverityNumber.ERROR, + severity_text="ERROR", + ) + + logger.emit(log_record_error) + + log_record_processor_mock.on_emit.assert_called_once() + log_data = log_record_processor_mock.on_emit.call_args.args[0] + self.assertTrue(isinstance(log_data.log_record, LogRecord)) + self.assertEqual( + log_data.log_record.severity_number, SeverityNumber.ERROR + ) + + def test_emit_logrecord_with_min_severity_unspecified(self): + """Test that when min severity is UNSPECIFIED, all logs are emitted""" + logger, log_record_processor_mock = self._get_logger() + log_record = LogRecord( + observed_timestamp=0, + body="debug log line", + severity_number=SeverityNumber.DEBUG, + severity_text="DEBUG", + ) + logger.emit(log_record) + log_record_processor_mock.on_emit.assert_called_once() + + def test_emit_logrecord_with_trace_based_sampling_filtering(self): + """Test that logs are filtered based on trace sampling state""" + config = LoggerConfig(trace_based_sampling=True) + logger, log_record_processor_mock = self._get_logger(config) + + mock_span_context = Mock() + mock_span_context.is_valid = True + mock_span_context.trace_flags.sampled = False + + mock_span = Mock() + mock_span.get_span_context.return_value = mock_span_context + + mock_context = Mock() + + with patch( + "opentelemetry.sdk._logs._internal.get_current_span", + return_value=mock_span, + ): + log_record = LogRecord( + observed_timestamp=0, + body="should be dropped", + severity_number=SeverityNumber.INFO, + severity_text="INFO", + context=mock_context, + ) + + logger.emit(log_record) + log_record_processor_mock.on_emit.assert_not_called() + + log_record_processor_mock.reset_mock() + + mock_span_context = Mock() + mock_span_context.is_valid = True + mock_span_context.trace_flags.sampled = True + + mock_span = Mock() + mock_span.get_span_context.return_value = mock_span_context + + def test_emit_logrecord_trace_filtering_disabled(self): + """Test that when trace-based filtering is disabled, all logs are emitted""" + logger, log_record_processor_mock = self._get_logger() + + mock_span_context = Mock() + mock_span_context.is_valid = False + mock_span_context.trace_flags.sampled = False + + mock_span = Mock() + mock_span.get_span_context.return_value = mock_span_context + + mock_context = Mock() + + with patch( + "opentelemetry.sdk._logs._internal.get_current_span", + return_value=mock_span, + ): + log_record = LogRecord( + observed_timestamp=0, + body="should be emitted when filtering disabled", + severity_number=SeverityNumber.INFO, + severity_text="INFO", + context=mock_context, + ) + + logger.emit(log_record) + log_record_processor_mock.on_emit.assert_called_once() + + def test_emit_logrecord_trace_filtering_edge_cases(self): + """Test edge cases for trace-based filtering""" + config = LoggerConfig(trace_based_sampling=True) + logger, log_record_processor_mock = self._get_logger(config) + + mock_span_context = Mock() + mock_span_context.is_valid = False + mock_span_context.trace_flags.sampled = True + + mock_span = Mock() + mock_span.get_span_context.return_value = mock_span_context + + mock_context = Mock() + + with patch( + "opentelemetry.sdk._logs._internal.get_current_span", + return_value=mock_span, + ): + log_record = LogRecord( + observed_timestamp=0, + body="invalid but sampled", + severity_number=SeverityNumber.INFO, + severity_text="INFO", + context=mock_context, + ) + + logger.emit(log_record) + log_record_processor_mock.on_emit.assert_called_once() + + log_record_processor_mock.reset_mock() + + mock_span_context = Mock() + mock_span_context.is_valid = True + mock_span_context.trace_flags.sampled = False + + mock_span = Mock() + mock_span.get_span_context.return_value = mock_span_context + + with patch( + "opentelemetry.sdk._logs._internal.get_current_span", + return_value=mock_span, + ): + log_record = LogRecord( + observed_timestamp=0, + body="valid but not sampled", + severity_number=SeverityNumber.INFO, + severity_text="INFO", + context=mock_context, + ) + + logger.emit(log_record) + log_record_processor_mock.on_emit.assert_not_called() + + def test_emit_both_min_severity_and_trace_based_sampling_filtering(self): + """Test that both min severity and trace-based filtering work together""" + config = LoggerConfig( + minimum_severity_level=SeverityNumber.WARN, + trace_based_sampling=True, + ) + logger, log_record_processor_mock = self._get_logger(config) + + mock_span_context = Mock() + mock_span_context.is_valid = True + mock_span_context.trace_flags.sampled = True + + mock_span = Mock() + mock_span.get_span_context.return_value = mock_span_context + + mock_context = Mock() + + with patch( + "opentelemetry.sdk._logs._internal.get_current_span", + return_value=mock_span, + ): + log_record_info = LogRecord( + observed_timestamp=0, + body="info log line", + severity_number=SeverityNumber.INFO, + severity_text="INFO", + context=mock_context, + ) + + logger.emit(log_record_info) + log_record_processor_mock.on_emit.assert_not_called() + + log_record_processor_mock.reset_mock() + + log_record_error = LogRecord( + observed_timestamp=0, + body="error log line", + severity_number=SeverityNumber.ERROR, + severity_text="ERROR", + context=mock_context, + ) + + logger.emit(log_record_error) + log_record_processor_mock.on_emit.assert_called_once() + + def test_emit_logrecord_with_disabled_logger(self): + """Test that disabled loggers don't emit any logs""" + config = LoggerConfig(disabled=True) + logger, log_record_processor_mock = self._get_logger(config) + + log_record = LogRecord( + observed_timestamp=0, + body="this should be dropped", + severity_number=SeverityNumber.ERROR, + severity_text="ERROR", + ) + + logger.emit(log_record) + log_record_processor_mock.on_emit.assert_not_called() + + def test_logger_config_property(self): + """Test that logger config property works correctly""" + config = LoggerConfig( + disabled=True, + minimum_severity_level=SeverityNumber.WARN, + trace_based_sampling=True, + ) + logger, _ = self._get_logger(config) + + self.assertEqual(logger.config.disabled, True) + self.assertEqual( + logger.config.minimum_severity_level, SeverityNumber.WARN + ) + self.assertEqual(logger.config.trace_based_sampling, True) + + def test_logger_configurator_behavior(self): + """Test LoggerConfigurator functionality including custom configurators and dynamic updates""" + + logger_configs = { + "test.database": LoggerConfig( + minimum_severity_level=SeverityNumber.ERROR + ), + "test.auth": LoggerConfig(disabled=True), + "test.performance": LoggerConfig(trace_based_sampling=True), + } + + configurator = create_logger_configurator_by_name(logger_configs) + + provider = LoggerProvider(logger_configurator=configurator) + + db_logger = provider.get_logger("test.database") + self.assertEqual( + db_logger.config.minimum_severity_level, SeverityNumber.ERROR + ) + self.assertFalse(db_logger.config.disabled) + self.assertFalse(db_logger.config.trace_based_sampling) + + auth_logger = provider.get_logger("test.auth") + self.assertTrue(auth_logger.config.disabled) + + perf_logger = provider.get_logger("test.performance") + self.assertTrue(perf_logger.config.trace_based_sampling) + + other_logger = provider.get_logger("test.other") + self.assertEqual( + other_logger.config.minimum_severity_level, + SeverityNumber.UNSPECIFIED, + ) + self.assertFalse(other_logger.config.disabled) + self.assertFalse(other_logger.config.trace_based_sampling) + + def test_logger_configurator_pattern_matching(self): + """Test LoggerConfigurator with pattern matching""" + patterns = [ + ( + "test.database.*", + LoggerConfig(minimum_severity_level=SeverityNumber.ERROR), + ), + ("test.*.debug", LoggerConfig(disabled=True)), + ("test.*", LoggerConfig(trace_based_sampling=True)), + ("*", LoggerConfig(minimum_severity_level=SeverityNumber.WARN)), + ] + + configurator = create_logger_configurator_with_pattern(patterns) + provider = LoggerProvider(logger_configurator=configurator) + + db_logger = provider.get_logger("test.database.connection") + self.assertEqual( + db_logger.config.minimum_severity_level, SeverityNumber.ERROR + ) + + debug_logger = provider.get_logger("test.module.debug") + self.assertTrue(debug_logger.config.disabled) + + general_logger = provider.get_logger("test.module") + self.assertTrue(general_logger.config.trace_based_sampling) + + other_logger = provider.get_logger("other.module") + self.assertEqual( + other_logger.config.minimum_severity_level, SeverityNumber.WARN + ) + + def test_logger_configurator_dynamic_updates(self): + """Test that LoggerConfigurator updates apply to existing loggers""" + initial_configs = { + "test.module": LoggerConfig( + minimum_severity_level=SeverityNumber.INFO + ) + } + + initial_configurator = create_logger_configurator_by_name( + initial_configs + ) + + provider = LoggerProvider(logger_configurator=initial_configurator) + + logger = provider.get_logger("test.module") + self.assertEqual( + logger.config.minimum_severity_level, SeverityNumber.INFO + ) + self.assertFalse(logger.config.disabled) + + updated_configs = { + "test.module": LoggerConfig( + minimum_severity_level=SeverityNumber.ERROR, disabled=True + ) + } + updated_configurator = create_logger_configurator_by_name( + updated_configs + ) + + provider.set_logger_configurator(updated_configurator) + + self.assertEqual( + logger.config.minimum_severity_level, SeverityNumber.ERROR + ) + self.assertTrue(logger.config.disabled) + + new_logger = provider.get_logger("test.module") + self.assertEqual( + new_logger.config.minimum_severity_level, SeverityNumber.ERROR + ) + self.assertTrue(new_logger.config.disabled) + + def test_logger_configurator_returns_none(self): + """Test LoggerConfigurator that returns None falls back to default""" + + def none_configurator(scope): + return None + + provider = LoggerProvider( + logger_configurator=none_configurator, + min_severity_level=SeverityNumber.WARN, + trace_based_sampling=True, + ) + + logger = provider.get_logger("test.module") + + self.assertEqual( + logger.config.minimum_severity_level, SeverityNumber.WARN + ) + self.assertTrue(logger.config.trace_based_sampling) + self.assertFalse(logger.config.disabled) + + @staticmethod + def _selective_configurator(scope): + if scope.name == "disabled.logger": + return LoggerConfig(disabled=True) + if scope.name == "error.logger": + return LoggerConfig(minimum_severity_level=SeverityNumber.ERROR) + if scope.name == "trace.logger": + return LoggerConfig(trace_based_sampling=True) + return LoggerConfig() + + def test_logger_configurator_with_filtering(self): + """Test that LoggerConfigurator configs are properly applied during filtering""" + + provider = LoggerProvider( + logger_configurator=self._selective_configurator + ) + + disabled_logger = provider.get_logger("disabled.logger") + log_record_processor_mock = Mock() + disabled_logger._multi_log_record_processor = log_record_processor_mock + + log_record = LogRecord( + observed_timestamp=0, + body="should not emit", + severity_number=SeverityNumber.INFO, + ) + disabled_logger.emit(log_record) + log_record_processor_mock.on_emit.assert_not_called() + + error_logger = provider.get_logger("error.logger") + log_record_processor_mock = Mock() + error_logger._multi_log_record_processor = log_record_processor_mock + + info_record = LogRecord( + observed_timestamp=0, + body="info message", + severity_number=SeverityNumber.INFO, + ) + error_logger.emit(info_record) + log_record_processor_mock.on_emit.assert_not_called() + + error_record = LogRecord( + observed_timestamp=0, + body="error message", + severity_number=SeverityNumber.ERROR, + ) + error_logger.emit(error_record) + log_record_processor_mock.on_emit.assert_called_once() + + trace_logger = provider.get_logger("trace.logger") + log_record_processor_mock = Mock() + trace_logger._multi_log_record_processor = log_record_processor_mock + + mock_span_context = Mock() + mock_span_context.is_valid = True + mock_span_context.trace_flags.sampled = False + + mock_span = Mock() + mock_span.get_span_context.return_value = mock_span_context + + mock_context = Mock() + + with patch( + "opentelemetry.sdk._logs._internal.get_current_span", + return_value=mock_span, + ): + trace_record = LogRecord( + observed_timestamp=0, + body="unsampled trace message", + severity_number=SeverityNumber.INFO, + context=mock_context, + ) + trace_logger.emit(trace_record) + log_record_processor_mock.on_emit.assert_not_called()