Skip to content

fix(logger): remove subclassing and move unnecessary APIs #2334

New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Merged
51 changes: 51 additions & 0 deletions aws_lambda_powertools/logging/compat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
"""Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work."""
from __future__ import annotations

import io
import logging
import os
import traceback


def findCaller(stack_info=False, stacklevel=2): # pragma: no cover
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = logging.currentframe() # noqa: VNE001
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is None:
return "(unknown file)", 0, "(unknown function)", None
while stacklevel > 0:
next_f = f.f_back
if next_f is None:
## We've got options here.
## If we want to use the last (deepest) frame:
break
## If we want to mimic the warnings module:
# return ("sys", 1, "(unknown function)", None) # noqa: E800
## If we want to be pedantic: # noqa: E800
# raise ValueError("call stack is not deep enough") # noqa: E800
f = next_f # noqa: VNE001
if not _is_internal_frame(f):
stacklevel -= 1
co = f.f_code
sinfo = None
if stack_info:
with io.StringIO() as sio:
sio.write("Stack (most recent call last):\n")
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == "\n":
sinfo = sinfo[:-1]
return co.co_filename, f.f_lineno, co.co_name, sinfo


# The following is based on warnings._is_internal_frame. It makes sure that
# frames of the import mechanism are skipped when logging at module level and
# using a stacklevel value greater than one.
def _is_internal_frame(frame): # pragma: no cover
"""Signal whether the frame is a CPython or logging module internal."""
filename = os.path.normcase(frame.f_code.co_filename)
return filename == logging._srcfile or ("importlib" in filename and "_bootstrap" in filename)
145 changes: 50 additions & 95 deletions aws_lambda_powertools/logging/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,10 @@

import functools
import inspect
import io
import logging
import os
import random
import sys
import traceback
from typing import (
IO,
TYPE_CHECKING,
Expand All @@ -25,6 +23,8 @@

import jmespath

from aws_lambda_powertools.logging import compat

from ..shared import constants
from ..shared.functions import (
extract_event_from_common_models,
Expand Down Expand Up @@ -66,12 +66,7 @@ def _is_cold_start() -> bool:
return cold_start


# PyCharm does not support autocomplete via getattr
# so we need to return to subclassing removed in #97
# All methods/properties continue to be proxied to inner logger
# https://github.com/awslabs/aws-lambda-powertools-python/issues/107
# noinspection PyRedeclaration
class Logger(logging.Logger): # lgtm [py/missing-call-to-init]
class Logger:
"""Creates and setups a logger to format statements in JSON.

Includes service name and any additional key=value into logs
Expand Down Expand Up @@ -238,7 +233,6 @@ def __init__(
self.logger_handler = logger_handler or logging.StreamHandler(stream)
self.log_uncaught_exceptions = log_uncaught_exceptions

self.log_level = self._get_log_level(level)
self._is_deduplication_disabled = resolve_truthy_env_var_choice(
env=os.getenv(constants.LOGGER_LOG_DEDUPLICATION_ENV, "false")
)
Expand All @@ -258,7 +252,7 @@ def __init__(
"use_rfc3339": use_rfc3339,
}

self._init_logger(formatter_options=formatter_options, **kwargs)
self._init_logger(formatter_options=formatter_options, log_level=level, **kwargs)

if self.log_uncaught_exceptions:
logger.debug("Replacing exception hook")
Expand All @@ -277,11 +271,11 @@ def _get_logger(self):
"""Returns a Logger named {self.service}, or {self.service.filename} for child loggers"""
logger_name = self.service
if self.child:
logger_name = f"{self.service}.{self._get_caller_filename()}"
logger_name = f"{self.service}.{_get_caller_filename()}"

return logging.getLogger(logger_name)

def _init_logger(self, formatter_options: Optional[Dict] = None, **kwargs):
def _init_logger(self, formatter_options: Optional[Dict] = None, log_level: Union[str, int, None] = None, **kwargs):
"""Configures new logger"""

# Skip configuration if it's a child logger or a pre-configured logger
Expand All @@ -293,13 +287,13 @@ def _init_logger(self, formatter_options: Optional[Dict] = None, **kwargs):
if self.child or is_logger_preconfigured:
return

self._logger.setLevel(self._determine_log_level(log_level))
self._configure_sampling()
self._logger.setLevel(self.log_level)
self._logger.addHandler(self.logger_handler)
self.structure_logs(formatter_options=formatter_options, **kwargs)

# Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work
self._logger.findCaller = self.findCaller
self._logger.findCaller = compat.findCaller

# Pytest Live Log feature duplicates log records for colored output
# but we explicitly add a filter for log deduplication.
Expand Down Expand Up @@ -329,7 +323,7 @@ def _configure_sampling(self):
try:
if self.sampling_rate and random.random() <= float(self.sampling_rate):
logger.debug("Setting log level to Debug due to sampling rate")
self.setLevel(logging.DEBUG)
self._logger.setLevel(logging.DEBUG)
except ValueError:
raise InvalidLoggerSamplingRateError(
f"Expected a float value ranging 0 to 1, but received {self.sampling_rate} instead."
Expand Down Expand Up @@ -445,19 +439,6 @@ def decorate(event, context, *args, **kwargs):

return decorate

def setLevel(self, level: Union[str, int]):
"""
Set the logging level for the logger.

Parameters:
-----------
level str | int
The level to set. Can be a string representing the level name: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
or an integer representing the level value: 10 for 'DEBUG', 20 for 'INFO', 30 for 'WARNING', 40 for 'ERROR', 50 for 'CRITICAL'. # noqa: E501
"""
self.log_level = level
self._logger.setLevel(level)

def info(
self,
msg: object,
Expand Down Expand Up @@ -584,17 +565,6 @@ def append_keys(self, **additional_keys):
def remove_keys(self, keys: Iterable[str]):
self.registered_formatter.remove_keys(keys)

@property
def registered_handler(self) -> logging.Handler:
"""Convenience property to access logger handler"""
handlers = self._logger.parent.handlers if self.child else self._logger.handlers
return handlers[0]

@property
def registered_formatter(self) -> BasePowertoolsFormatter:
"""Convenience property to access logger formatter"""
return self.registered_handler.formatter # type: ignore

def structure_logs(self, append: bool = False, formatter_options: Optional[Dict] = None, **keys):
"""Sets logging formatting to JSON.

Expand Down Expand Up @@ -663,8 +633,38 @@ def get_correlation_id(self) -> Optional[str]:
return self.registered_formatter.log_format.get("correlation_id")
return None

@property
def registered_handler(self) -> logging.Handler:
"""Convenience property to access the first logger handler"""
handlers = self._logger.parent.handlers if self.child else self._logger.handlers
return handlers[0]

@property
def registered_formatter(self) -> BasePowertoolsFormatter:
"""Convenience property to access the first logger formatter"""
return self.registered_handler.formatter # type: ignore[return-value]

@property
def log_level(self) -> int:
return self._logger.level

@property
def name(self) -> str:
return self._logger.name

@property
def handlers(self) -> List[logging.Handler]:
"""List of registered logging handlers

Notes
-----

Looking for the first configured handler? Use registered_handler property instead.
"""
return self._logger.handlers

@staticmethod
def _get_log_level(level: Union[str, int, None]) -> Union[str, int]:
def _determine_log_level(level: Union[str, int, None]) -> Union[str, int]:
"""Returns preferred log level set by the customer in upper case"""
if isinstance(level, int):
return level
Expand All @@ -675,51 +675,6 @@ def _get_log_level(level: Union[str, int, None]) -> Union[str, int]:

return log_level.upper()

@staticmethod
def _get_caller_filename():
"""Return caller filename by finding the caller frame"""
# Current frame => _get_logger()
# Previous frame => logger.py
# Before previous frame => Caller
frame = inspect.currentframe()
caller_frame = frame.f_back.f_back.f_back
return caller_frame.f_globals["__name__"]

# Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work
def findCaller(self, stack_info=False, stacklevel=2): # pragma: no cover
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = logging.currentframe() # noqa: VNE001
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is None:
return "(unknown file)", 0, "(unknown function)", None
while stacklevel > 0:
next_f = f.f_back
if next_f is None:
## We've got options here.
## If we want to use the last (deepest) frame:
break
## If we want to mimic the warnings module:
# return ("sys", 1, "(unknown function)", None) # noqa: E800
## If we want to be pedantic: # noqa: E800
# raise ValueError("call stack is not deep enough") # noqa: E800
f = next_f # noqa: VNE001
if not _is_internal_frame(f):
stacklevel -= 1
co = f.f_code
sinfo = None
if stack_info:
with io.StringIO() as sio:
sio.write("Stack (most recent call last):\n")
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == "\n":
sinfo = sinfo[:-1]
return co.co_filename, f.f_lineno, co.co_name, sinfo


def set_package_logger(
level: Union[str, int] = logging.DEBUG,
Expand Down Expand Up @@ -760,16 +715,16 @@ def set_package_logger(
logger.addHandler(handler)


# Maintenance: We can drop this upon Py3.7 EOL. It's a backport for "location" key to work
# The following is based on warnings._is_internal_frame. It makes sure that
# frames of the import mechanism are skipped when logging at module level and
# using a stacklevel value greater than one.
def _is_internal_frame(frame): # pragma: no cover
"""Signal whether the frame is a CPython or logging module internal."""
filename = os.path.normcase(frame.f_code.co_filename)
return filename == logging._srcfile or ("importlib" in filename and "_bootstrap" in filename)


def log_uncaught_exception_hook(exc_type, exc_value, exc_traceback, logger: Logger):
"""Callback function for sys.excepthook to use Logger to log uncaught exceptions"""
logger.exception(exc_value, exc_info=(exc_type, exc_value, exc_traceback)) # pragma: no cover


def _get_caller_filename():
"""Return caller filename by finding the caller frame"""
# Current frame => _get_logger()
# Previous frame => logger.py
# Before previous frame => Caller
frame = inspect.currentframe()
caller_frame = frame.f_back.f_back.f_back
return caller_frame.f_globals["__name__"]
2 changes: 1 addition & 1 deletion aws_lambda_powertools/logging/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def copy_config_to_registered_loggers(
exclude : Optional[Set[str]], optional
List of logger names to exclude, by default None
"""
level = log_level or source_logger.level
level = log_level or source_logger.log_level

# Assumptions: Only take parent loggers not children (dot notation rule)
# Steps:
Expand Down