Version 0.3.0
Some checks failed
Run linters on applied template / Python 3.13 lint and build (push) Failing after 2m36s
Some checks failed
Run linters on applied template / Python 3.13 lint and build (push) Failing after 2m36s
Changes: - fix double exception message in main request_processing span - add OpenSearch to Jaeger and OpenTelemetry Logs - add optional OpenTelemetry Logs Exporter to structlog - update deploy README
This commit is contained in:
@@ -100,7 +100,7 @@ def launch(
|
||||
config.app.host = host or config.app.host
|
||||
config.app.port = port or config.app.port
|
||||
config.app.debug = debug or config.app.debug
|
||||
config.logging = config.logging if logger_verbosity is None else LoggingConfig(level=logger_verbosity)
|
||||
config.observability.logging = config.observability.logging if logger_verbosity is None else LoggingConfig(level=logger_verbosity)
|
||||
|
||||
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
||||
temp_yaml_config_path = temp_file.name
|
||||
@@ -113,7 +113,7 @@ def launch(
|
||||
uvicorn_config = {
|
||||
"host": config.app.host,
|
||||
"port": config.app.port,
|
||||
"log_level": config.logging.level.lower(),
|
||||
"log_level": config.observability.logging.level.lower(),
|
||||
"env_file": temp_envfile_path,
|
||||
}
|
||||
if config.app.debug:
|
||||
|
||||
@@ -9,10 +9,9 @@ from typing import Any, Literal, TextIO, Type
|
||||
import yaml
|
||||
|
||||
from {{project_slug}}.db.config import DBConfig, MultipleDBsConfig
|
||||
from {{project_slug}}.utils.observability import LoggingConfig, FileLogger, ExporterConfig
|
||||
from {{project_slug}}.utils.secrets import SecretStr, representSecretStrYAML
|
||||
|
||||
from .utils.observability import LoggingLevel
|
||||
|
||||
|
||||
@dataclass
|
||||
class CORSConfig:
|
||||
@@ -30,22 +29,6 @@ class AppConfig:
|
||||
cors: CORSConfig
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileLogger:
|
||||
filename: str
|
||||
level: LoggingLevel
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoggingConfig:
|
||||
level: LoggingLevel
|
||||
files: list[FileLogger] = field(default_factory=list)
|
||||
|
||||
def __post_init__(self):
|
||||
if len(self.files) > 0 and isinstance(self.files[0], dict):
|
||||
self.files = [FileLogger(**f) for f in self.files]
|
||||
|
||||
|
||||
@dataclass
|
||||
class PrometheusConfig:
|
||||
host: str
|
||||
@@ -60,6 +43,7 @@ class JaegerConfig:
|
||||
|
||||
@dataclass
|
||||
class ObservabilityConfig:
|
||||
logging: LoggingConfig
|
||||
prometheus: PrometheusConfig | None = None
|
||||
jaeger: JaegerConfig | None = None
|
||||
|
||||
@@ -68,7 +52,6 @@ class ObservabilityConfig:
|
||||
class {{ProjectName}}Config:
|
||||
app: AppConfig
|
||||
db: MultipleDBsConfig
|
||||
logging: LoggingConfig
|
||||
observability: ObservabilityConfig
|
||||
|
||||
def to_order_dict(self) -> OrderedDict:
|
||||
@@ -134,8 +117,13 @@ class {{ProjectName}}Config:
|
||||
)
|
||||
],
|
||||
),
|
||||
logging=LoggingConfig(level="INFO", files=[FileLogger(filename="logs/info.log", level="INFO")]),
|
||||
observability=ObservabilityConfig(
|
||||
logging=LoggingConfig(
|
||||
level="INFO",
|
||||
root_logger_level="INFO",
|
||||
exporter=ExporterConfig(endpoint="http://127.0.0.1:4317", level="INFO", tls_insecure=True),
|
||||
files=[FileLogger(filename="logs/info.log", level="INFO")],
|
||||
),
|
||||
prometheus=PrometheusConfig(host="0.0.0.0", port=9090, urls_mapping={"/api/debug/.*": "/api/debug/*"}),
|
||||
jaeger=JaegerConfig(endpoint="http://127.0.0.1:4318/v1/traces"),
|
||||
),
|
||||
|
||||
@@ -95,11 +95,24 @@ def get_app(prefix: str = "/api") -> FastAPI:
|
||||
|
||||
application.state.config = app_config
|
||||
|
||||
exception_mapper = _get_exception_mapper(app_config.app.debug)
|
||||
logger = configure_logging(
|
||||
app_config.observability.logging,
|
||||
tracing_enabled=app_config.observability.jaeger is not None,
|
||||
)
|
||||
metrics = init_metrics()
|
||||
metrics_dep.init_dispencer(application, metrics)
|
||||
exception_mapper = _get_exception_mapper(app_config.app.debug)
|
||||
connection_manager = PostgresConnectionManager(
|
||||
master=app_config.db.master,
|
||||
replicas=app_config.db.replicas,
|
||||
logger=logger,
|
||||
application_name=f"{{project_slug}}_{VERSION}",
|
||||
)
|
||||
urls_mapper = URLsMapper(app_config.observability.prometheus.urls_mapping)
|
||||
|
||||
connection_manager_dep.init_dispencer(application, connection_manager)
|
||||
metrics_dep.init_dispencer(application, metrics)
|
||||
logger_dep.init_dispencer(application, logger)
|
||||
|
||||
application.add_middleware(
|
||||
ObservabilityMiddleware,
|
||||
exception_mapper=exception_mapper,
|
||||
@@ -108,7 +121,7 @@ def get_app(prefix: str = "/api") -> FastAPI:
|
||||
)
|
||||
application.add_middleware(
|
||||
ExceptionHandlerMiddleware,
|
||||
debug=[False], # reinitialized on startup
|
||||
debug=[app_config.app.debug],
|
||||
exception_mapper=exception_mapper,
|
||||
)
|
||||
|
||||
@@ -122,20 +135,10 @@ async def lifespan(application: FastAPI):
|
||||
Initializes database connection in pass_services_dependencies middleware.
|
||||
"""
|
||||
app_config: {{ProjectName}}Config = application.state.config
|
||||
loggers_dict = {logger_config.filename: logger_config.level for logger_config in app_config.logging.files}
|
||||
logger = configure_logging(app_config.logging.level, loggers_dict)
|
||||
logger = logger_dep.obtain(application)
|
||||
|
||||
await logger.ainfo("application is being configured", config=app_config.to_order_dict())
|
||||
|
||||
connection_manager = PostgresConnectionManager(
|
||||
master=app_config.db.master,
|
||||
replicas=app_config.db.replicas,
|
||||
logger=logger,
|
||||
application_name=f"{{project_slug}}_{VERSION}",
|
||||
)
|
||||
connection_manager_dep.init_dispencer(application, connection_manager)
|
||||
logger_dep.init_dispencer(application, logger)
|
||||
|
||||
for middleware in application.user_middleware:
|
||||
if middleware.cls == ExceptionHandlerMiddleware:
|
||||
middleware.kwargs["debug"][0] = app_config.app.debug
|
||||
|
||||
@@ -47,9 +47,15 @@ class ObservabilityMiddleware(BaseHTTPMiddleware): # pylint: disable=too-few-pu
|
||||
async def dispatch(self, request: Request, call_next):
|
||||
logger = logger_dep.obtain(request)
|
||||
_try_get_parent_span_id(request)
|
||||
with _tracer.start_as_current_span("http-request") as span:
|
||||
with _tracer.start_as_current_span("http-request", record_exception=False) as span:
|
||||
trace_id = hex(span.get_span_context().trace_id or randint(1, 1 << 63))[2:]
|
||||
span_id = span.get_span_context().span_id or randint(1, 1 << 31)
|
||||
if trace_id == 0:
|
||||
trace_id = format(randint(1, 1 << 63), "016x")
|
||||
span_id = format(randint(1, 1 << 31), "032x")
|
||||
logger = logger.bind(trace_id=trace_id, span_id=span_id)
|
||||
logger_dep.attach_to_request(request, logger)
|
||||
|
||||
span.set_attributes(
|
||||
{
|
||||
http_attributes.HTTP_REQUEST_METHOD: request.method,
|
||||
@@ -58,8 +64,6 @@ class ObservabilityMiddleware(BaseHTTPMiddleware): # pylint: disable=too-few-pu
|
||||
"request_client": request.client.host,
|
||||
}
|
||||
)
|
||||
logger = logger.bind(trace_id=trace_id, span_id=span_id)
|
||||
request.state.logger = logger
|
||||
|
||||
await logger.ainfo(
|
||||
"handling request",
|
||||
|
||||
@@ -1,20 +1,56 @@
|
||||
"""Observability helper functions are defined here."""
|
||||
|
||||
import logging
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
import structlog
|
||||
from opentelemetry import trace
|
||||
from opentelemetry._logs import set_logger_provider
|
||||
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import OTLPLogExporter
|
||||
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
LoggingLevel = Literal["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExporterConfig:
|
||||
endpoint: str
|
||||
level: LoggingLevel = "INFO"
|
||||
tls_insecure: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileLogger:
|
||||
filename: str
|
||||
level: LoggingLevel
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoggingConfig:
|
||||
level: LoggingLevel
|
||||
exporter: ExporterConfig | None
|
||||
root_logger_level: LoggingLevel = "INFO"
|
||||
files: list[FileLogger] = field(default_factory=list)
|
||||
|
||||
def __post_init__(self):
|
||||
if len(self.files) > 0 and isinstance(self.files[0], dict):
|
||||
self.files = [FileLogger(**f) for f in self.files]
|
||||
|
||||
|
||||
def configure_logging(
|
||||
log_level: LoggingLevel, files: dict[str, LoggingLevel] | None = None, root_logger_level: LoggingLevel = "INFO"
|
||||
config: LoggingConfig,
|
||||
tracing_enabled: bool,
|
||||
) -> structlog.stdlib.BoundLogger:
|
||||
files = {logger_config.filename: logger_config.level for logger_config in config.files}
|
||||
|
||||
level_name_mapping = {
|
||||
"DEBUG": logging.DEBUG,
|
||||
"INFO": logging.INFO,
|
||||
@@ -22,25 +58,44 @@ def configure_logging(
|
||||
"ERROR": logging.ERROR,
|
||||
"CRITICAL": logging.CRITICAL,
|
||||
}
|
||||
files = files or {}
|
||||
|
||||
log_level = level_name_mapping[config.level]
|
||||
|
||||
processors = [
|
||||
structlog.contextvars.merge_contextvars,
|
||||
structlog.stdlib.add_log_level,
|
||||
structlog.stdlib.add_logger_name,
|
||||
structlog.processors.TimeStamper(fmt="iso"),
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
structlog.processors.format_exc_info,
|
||||
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
|
||||
]
|
||||
|
||||
if tracing_enabled:
|
||||
|
||||
def add_open_telemetry_spans(_, __, event_dict: dict):
|
||||
span = trace.get_current_span()
|
||||
if not span or not span.is_recording():
|
||||
return event_dict
|
||||
|
||||
ctx = span.get_span_context()
|
||||
|
||||
event_dict["span_id"] = format(ctx.span_id, "016x")
|
||||
event_dict["trace_id"] = format(ctx.trace_id, "032x")
|
||||
|
||||
return event_dict
|
||||
|
||||
processors.insert(len(processors) - 1, add_open_telemetry_spans)
|
||||
|
||||
structlog.configure(
|
||||
processors=[
|
||||
structlog.contextvars.merge_contextvars,
|
||||
structlog.stdlib.add_log_level,
|
||||
structlog.stdlib.add_logger_name,
|
||||
structlog.processors.TimeStamper(fmt="iso"),
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
structlog.processors.format_exc_info,
|
||||
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
|
||||
],
|
||||
processors=processors,
|
||||
logger_factory=structlog.stdlib.LoggerFactory(),
|
||||
wrapper_class=structlog.stdlib.BoundLogger,
|
||||
cache_logger_on_first_use=True,
|
||||
)
|
||||
|
||||
logger: structlog.stdlib.BoundLogger = structlog.get_logger("main")
|
||||
logger.setLevel(level_name_mapping[log_level])
|
||||
logger.setLevel(log_level)
|
||||
|
||||
console_handler = logging.StreamHandler(sys.stderr)
|
||||
console_handler.setFormatter(
|
||||
@@ -60,7 +115,29 @@ def configure_logging(
|
||||
file_handler.setLevel(level_name_mapping[level])
|
||||
root_logger.addHandler(file_handler)
|
||||
|
||||
root_logger.setLevel(root_logger_level)
|
||||
root_logger.setLevel(config.root_logger_level)
|
||||
|
||||
if config.exporter is not None:
|
||||
logger_provider = LoggerProvider(
|
||||
resource=Resource.create(
|
||||
{
|
||||
"service.name": "{{project_name}}",
|
||||
"service.instance.id": platform.node(),
|
||||
}
|
||||
),
|
||||
)
|
||||
set_logger_provider(logger_provider)
|
||||
|
||||
otlp_exporter = OTLPLogExporter(endpoint=config.exporter.endpoint, insecure=config.exporter.tls_insecure)
|
||||
logger_provider.add_log_record_processor(BatchLogRecordProcessor(otlp_exporter))
|
||||
|
||||
exporter_handler = AttrFilteredLoggingHandler(
|
||||
level=config.exporter.level,
|
||||
logger_provider=logger_provider,
|
||||
)
|
||||
# exporter_handler.setFormatter(structlog.stdlib.ProcessorFormatter(processor=structlog.processors.JSONRenderer()))
|
||||
exporter_handler.setLevel(level_name_mapping[config.exporter.level])
|
||||
logger.addHandler(exporter_handler)
|
||||
|
||||
return logger
|
||||
|
||||
@@ -102,3 +179,15 @@ def get_span_headers() -> dict[str, str]:
|
||||
"X-Span-Id": str(ctx.span_id),
|
||||
"X-Trace-Id": str(ctx.trace_id),
|
||||
}
|
||||
|
||||
|
||||
class AttrFilteredLoggingHandler(LoggingHandler):
|
||||
DROP_ATTRIBUTES = ["_logger"]
|
||||
|
||||
@staticmethod
|
||||
def _get_attributes(record: logging.LogRecord) -> Attributes:
|
||||
attributes = LoggingHandler._get_attributes(record)
|
||||
for attr in AttrFilteredLoggingHandler.DROP_ATTRIBUTES:
|
||||
if attr in attributes:
|
||||
del attributes[attr]
|
||||
return attributes
|
||||
|
||||
Reference in New Issue
Block a user