Skip to content

Commit

Permalink
Add node id to log records (#800)
Browse files Browse the repository at this point in the history
  • Loading branch information
agners authored Jul 11, 2024
1 parent fa4002e commit a4b92a6
Show file tree
Hide file tree
Showing 4 changed files with 108 additions and 16 deletions.
52 changes: 52 additions & 0 deletions matter_server/common/helpers/logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
"""Logger related helpers."""

import logging
from typing import cast

import coloredlogs
from coloredlogs import ColoredFormatter


class MatterFormatter(ColoredFormatter): # type: ignore[misc]
"""Custom formatter for Matter project."""

def __init__(
self,
fmt: str,
node_fmt: str,
datefmt: str,
style: str = coloredlogs.DEFAULT_FORMAT_STYLE,
level_styles: dict | None = None,
field_styles: dict | None = None,
):
"""Initialize the Matter specific log formatter."""
super().__init__(fmt, datefmt, style, level_styles, field_styles)
self._node_style = logging.PercentStyle(self.colorize_format(node_fmt, style))

def format(self, record: logging.LogRecord) -> str:
"""Format the log record."""
original_style = self._style # type: ignore[has-type]
if hasattr(record, "node"):
self._style = self._node_style
result = super().format(record)
self._style = original_style
return cast(str, result)


class MatterNodeFilter(logging.Filter):
"""Filter for Matter project to filter by node."""

def __init__(self, node: set[int], name: str = ""):
"""Initialize the filter."""
super().__init__(name)
self.node = node

def filter(self, record: logging.LogRecord) -> bool:
"""Filter the log record."""
if not hasattr(record, "node"):
return True

# Always display warnings and above
if record.levelno >= logging.WARNING:
return True
return record.node in self.node
30 changes: 26 additions & 4 deletions matter_server/server/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import coloredlogs

from matter_server.common.const import VERBOSE_LOG_LEVEL
from matter_server.common.helpers.logger import MatterFormatter, MatterNodeFilter
from matter_server.server import stack

from .server import MatterServer
Expand Down Expand Up @@ -109,6 +110,12 @@
required=False,
help="Optional bluetooth adapter (id) to enable direct commisisoning support.",
)
parser.add_argument(
"--log-node-ids",
type=int,
nargs="+",
help="List of node IDs to show logs from (applies only to server logs).",
)

args = parser.parse_args()

Expand All @@ -117,28 +124,43 @@ def _setup_logging() -> None:
log_fmt = (
"%(asctime)s.%(msecs)03d (%(threadName)s) %(levelname)s [%(name)s] %(message)s"
)
node_log_fmt = "%(asctime)s.%(msecs)03d (%(threadName)s) %(levelname)s [%(name)s] <Node:%(node)s> %(message)s"
custom_level_style = {
**coloredlogs.DEFAULT_LEVEL_STYLES,
"chip_automation": {"color": "green", "faint": True},
"chip_detail": {"color": "green", "faint": True},
"chip_progress": {},
"chip_error": {"color": "red"},
}
custom_field_styles = {
**coloredlogs.DEFAULT_FIELD_STYLES,
"node": {"color": "magenta"},
}
# Let coloredlogs handle all levels, we filter levels in the logging module
coloredlogs.install(
level=logging.NOTSET, level_styles=custom_level_style, fmt=log_fmt
handler = coloredlogs.StandardErrorHandler(level=logging.NOTSET)
handler.setFormatter(
MatterFormatter(
fmt=log_fmt,
node_fmt=node_log_fmt,
datefmt=FORMAT_DATETIME,
level_styles=custom_level_style,
field_styles=custom_field_styles,
)
)

if args.log_node_ids:
handler.addFilter(MatterNodeFilter(set(args.log_node_ids)))

# Capture warnings.warn(...) and friends messages in logs.
# The standard destination for them is stderr, which may end up unnoticed.
# This way they're where other messages are, and can be filtered as usual.
logging.captureWarnings(True)

logging.basicConfig(level=args.log_level.upper())
logger = logging.getLogger()
logging.addLevelName(VERBOSE_LOG_LEVEL, "VERBOSE")
logging.basicConfig(level=args.log_level.upper(), handlers=[handler])

# setup file handler
logger = logging.getLogger()
if args.log_file:
log_filename = os.path.join(args.log_file)
file_handler = RotatingFileHandler(
Expand Down
29 changes: 19 additions & 10 deletions matter_server/server/device_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import asyncio
from collections import deque
from datetime import datetime
from functools import cached_property
from functools import cached_property, lru_cache
import logging
import secrets
import time
Expand Down Expand Up @@ -230,6 +230,13 @@ def _loop(self) -> asyncio.AbstractEventLoop:
assert self.server.loop
return self.server.loop

@lru_cache(maxsize=1024) # noqa: B019
def get_node_logger(
self, logger: logging.Logger, node_id: int
) -> logging.LoggerAdapter:
"""Return a logger for a specific node."""
return logging.LoggerAdapter(logger, {"node": node_id})

@api_command(APICommand.GET_NODES)
def get_nodes(self, only_available: bool = False) -> list[MatterNodeData]:
"""Return all Nodes known to the server."""
Expand Down Expand Up @@ -757,7 +764,7 @@ async def ping_node(self, node_id: int, attempts: int = 1) -> NodePingResult:
raise NodeNotExists(
f"Node {node_id} does not exist or is not yet interviewed"
)
node_logger = LOGGER.getChild(f"node_{node_id}")
node_logger = self.get_node_logger(LOGGER, node_id)

battery_powered = (
node.attributes.get(ROUTING_ROLE_ATTRIBUTE_PATH, 0)
Expand Down Expand Up @@ -813,7 +820,7 @@ async def _get_node_ip_addresses(
raise NodeNotExists(
f"Node {node_id} does not exist or is not yet interviewed"
)
node_logger = LOGGER.getChild(f"node_{node_id}")
node_logger = self.get_node_logger(LOGGER, node_id)
# query mdns for all IP's
# ensure both fabric id and node id have 16 characters (prefix with zero's)
mdns_name = f"{self.compressed_fabric_id:0{16}X}-{node_id:0{16}X}.{MDNS_TYPE_OPERATIONAL_NODE}"
Expand Down Expand Up @@ -882,7 +889,7 @@ async def _subscribe_node(self, node_id: int) -> None:
f"Node {node_id} does not exist or has not been interviewed."
)

node_logger = LOGGER.getChild(f"node_{node_id}")
node_logger = self.get_node_logger(LOGGER, node_id)

# Shutdown existing subscriptions for this node first
await self._chip_device_controller.shutdown_subscription(node_id)
Expand Down Expand Up @@ -1104,7 +1111,8 @@ async def _setup_node(self, node_id: int) -> None:
# prevent duplicate setup actions
return
self._nodes_in_setup.add(node_id)
node_logger = LOGGER.getChild(f"node_{node_id}")

node_logger = self.get_node_logger(LOGGER, node_id)
node_data = self._nodes[node_id]
log_timers: dict[int, asyncio.TimerHandle] = {}
is_thread_node = (
Expand Down Expand Up @@ -1297,6 +1305,7 @@ def _on_mdns_operational_node_state(
# the mdns name is constructed as [fabricid]-[nodeid]._matter._tcp.local.
# extract the node id from the name
node_id = int(name.split("-")[1].split(".")[0], 16)
node_logger = self.get_node_logger(logger, node_id)

if not (node := self._nodes.get(node_id)):
return # this should not happen, but guard just in case
Expand All @@ -1315,13 +1324,13 @@ def _on_mdns_operational_node_state(
return

if not self._chip_device_controller.node_has_subscription(node_id):
logger.info("Node %s discovered on MDNS", node_id)
node_logger.info("Discovered on mDNS")
elif (now - last_seen) > NODE_MDNS_BACKOFF:
# node came back online after being offline for a while or restarted
logger.info("Node %s re-discovered on MDNS", node_id)
node_logger.info("Re-discovered on mDNS")
elif state_change == ServiceStateChange.Added:
# Trigger node re-subscriptions when mDNS entry got added
logger.info("Node %s activity on MDNS, trigger resubscribe", node_id)
node_logger.info("Activity on mDNS, trigger resubscribe")
asyncio.create_task(
self._chip_device_controller.trigger_resubscribe_if_scheduled(
node_id, "mDNS state change detected"
Expand Down Expand Up @@ -1383,7 +1392,7 @@ def _node_unavailable(
return
node.available = False
self.server.signal_event(EventType.NODE_UPDATED, node)
node_logger = LOGGER.getChild(f"node_{node_id}")
node_logger = self.get_node_logger(LOGGER, node_id)
node_logger.info("Marked node as unavailable")
if force_resubscription:
# Make sure the subscriptions are expiring very soon to trigger subscription
Expand Down Expand Up @@ -1419,7 +1428,7 @@ async def _node_offline(self, node_id: int) -> None:
return # nothing to do to
node.available = False
self.server.signal_event(EventType.NODE_UPDATED, node)
node_logger = LOGGER.getChild(f"node_{node_id}")
node_logger = self.get_node_logger(LOGGER, node_id)
node_logger.info("Marked node as offline")

async def _fallback_node_scanner(self) -> None:
Expand Down
13 changes: 11 additions & 2 deletions matter_server/server/sdk.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from __future__ import annotations

import asyncio
from functools import partial
from functools import lru_cache, partial
import logging
import time
from typing import TYPE_CHECKING, Any, TypeVar, cast
Expand Down Expand Up @@ -41,6 +41,8 @@

LOGGER = logging.getLogger(__name__)

# pylint: disable=too-many-public-methods


class ChipDeviceControllerWrapper:
"""Class exposing CHIP/Matter devices controller features.
Expand Down Expand Up @@ -99,6 +101,13 @@ async def _call_sdk(
) -> _T:
return await self._call_sdk_executor(None, target, *args, **kwargs)

@lru_cache(maxsize=1024) # noqa: B019
def get_node_logger(
self, logger: logging.Logger, node_id: int
) -> logging.LoggerAdapter:
"""Return a logger for a specific node."""
return logging.LoggerAdapter(logger, {"node": node_id})

async def get_compressed_fabric_id(self) -> int:
"""Get the compressed fabric id."""
return await self._call_sdk(self._chip_controller.GetCompressedFabricId)
Expand Down Expand Up @@ -365,7 +374,7 @@ async def find_or_establish_case_session(
if self._chip_controller is None:
raise RuntimeError("Device Controller not initialized.")

node_logger = LOGGER.getChild(f"node_{node_id}")
node_logger = self.get_node_logger(LOGGER, node_id)
attempt = 1

while attempt <= retries:
Expand Down

0 comments on commit a4b92a6

Please sign in to comment.