diff --git a/contrib/ldk-server-config.toml b/contrib/ldk-server-config.toml index ce569cad..5dc09425 100644 --- a/contrib/ldk-server-config.toml +++ b/contrib/ldk-server-config.toml @@ -15,6 +15,10 @@ dir_path = "/tmp/ldk-server/" # Path for LDK and BDK data persis [log] level = "Debug" # Log level (Error, Warn, Info, Debug, Trace) #file = "/tmp/ldk-server/ldk-server.log" # Log file path +log_to_file = true # Enable logging to a file (default: true, also logs to both stdout and stderr) +#max_size_mb = 50 # Max size of log file before rotation (default: 50MB) +#rotation_interval_hours = 24 # Max age of log file before rotation (default: 24h) +#max_files = 5 # Number of rotated log files to keep (default: 5) [tls] #cert_path = "/path/to/tls.crt" # Path to TLS certificate, by default uses dir_path/tls.crt diff --git a/docs/configuration.md b/docs/configuration.md index 45f3020f..3d970af3 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -61,8 +61,12 @@ Where persistent data is stored. Defaults to `~/.ldk-server/` on Linux and ### `[log]` -Log level and file path. The server reopens the log file on `SIGHUP`, which integrates with -standard `logrotate` setups. +Controls logging behavior. By default, `log_to_file` is `true` and logs are also written +to `stdout`/`stderr`. + +If `log_to_file` is enabled, the server performs internal rotation and retention +based on `max_size_mb`, `rotation_interval_hours`, and `max_files`. The server will +also reopen the log file on `SIGHUP` for compatibility with external tools like `logrotate`. ### `[tls]` diff --git a/docs/operations.md b/docs/operations.md index 1a8935dd..274325ec 100644 --- a/docs/operations.md +++ b/docs/operations.md @@ -21,13 +21,17 @@ The server handles `SIGTERM` and `CTRL-C` (SIGINT). On receipt, it: ### Log Rotation -> **Important:** LDK Server does not rotate or truncate its own log file. Without log rotation -> configured, the log file will grow indefinitely and can eventually fill your disk. A full -> disk can prevent the node from persisting channel state, risking fund loss. +By default, LDK Server logs to `stdout`/`stderr`. When running under `systemd` or Docker, +this allows the environment (e.g., `journald`) to handle persistence, rotation, and +compression automatically. -The server reopens its log file on `SIGHUP`. This integrates with standard `logrotate`. Save -the following config to `/etc/logrotate.d/ldk-server` (adjust the log path to match your -setup): +If you enable `log_to_file` in the configuration, LDK Server will automatically rotate +logs when they exceed 50MB or 24 hours (configurable) and keep the last 5 uncompressed +log files. + +If you prefer to use system `logrotate` for file logs, the server still reopens its log +file on `SIGHUP`. Save the following config to `/etc/logrotate.d/ldk-server` +(adjust the log path to match your setup): ``` /var/lib/ldk-server/regtest/ldk-server.log { diff --git a/ldk-server/src/main.rs b/ldk-server/src/main.rs index 3551a367..8b3f4f8b 100644 --- a/ldk-server/src/main.rs +++ b/ldk-server/src/main.rs @@ -48,7 +48,7 @@ use crate::io::persist::{ }; use crate::service::NodeService; use crate::util::config::{load_config, ArgsConfig, ChainSource}; -use crate::util::logger::ServerLogger; +use crate::util::logger::{LogConfig, ServerLogger}; use crate::util::metrics::Metrics; use crate::util::proto_adapter::{forwarded_payment_to_proto, payment_to_proto}; use crate::util::systemd; @@ -121,7 +121,14 @@ fn main() { std::process::exit(-1); } - let logger = match ServerLogger::init(config_file.log_level, &log_file_path) { + let log_config = LogConfig { + log_to_file: config_file.log_to_file, + log_max_files: config_file.log_max_files, + log_max_size_bytes: config_file.log_max_size_bytes, + log_rotation_interval_secs: config_file.log_rotation_interval_secs, + }; + + let logger = match ServerLogger::init(config_file.log_level, &log_file_path, log_config) { Ok(logger) => logger, Err(e) => { eprintln!("Failed to initialize logger: {e}"); @@ -519,6 +526,7 @@ fn main() { break; } _ = sighup_stream.recv() => { + info!("Received SIGHUP, reopening log file.."); if let Err(e) = logger.reopen() { error!("Failed to reopen log file on SIGHUP: {e}"); } diff --git a/ldk-server/src/util/config.rs b/ldk-server/src/util/config.rs index 24e31c81..c939718b 100644 --- a/ldk-server/src/util/config.rs +++ b/ldk-server/src/util/config.rs @@ -56,6 +56,10 @@ pub struct Config { pub lsps2_service_config: Option, pub log_level: LevelFilter, pub log_file_path: Option, + pub log_max_size_bytes: usize, + pub log_rotation_interval_secs: u64, + pub log_max_files: usize, + pub log_to_file: bool, pub pathfinding_scores_source_url: Option, pub metrics_enabled: bool, pub poll_metrics_interval: Option, @@ -110,6 +114,10 @@ struct ConfigBuilder { lsps2: Option, log_level: Option, log_file_path: Option, + log_max_size_mb: Option, + log_rotation_interval_hours: Option, + log_max_files: Option, + log_to_file: Option, pathfinding_scores_source_url: Option, metrics_enabled: Option, poll_metrics_interval: Option, @@ -158,6 +166,11 @@ impl ConfigBuilder { if let Some(log) = toml.log { self.log_level = log.level.or(self.log_level.clone()); self.log_file_path = log.file.or(self.log_file_path.clone()); + self.log_max_size_mb = log.max_size_mb.or(self.log_max_size_mb); + self.log_rotation_interval_hours = + log.rotation_interval_hours.or(self.log_rotation_interval_hours); + self.log_max_files = log.max_files.or(self.log_max_files); + self.log_to_file = log.log_to_file.or(self.log_to_file); } if let Some(liquidity) = toml.liquidity { @@ -249,6 +262,22 @@ impl ConfigBuilder { if let Some(tor_proxy_address) = &args.tor_proxy_address { self.tor_proxy_address = Some(tor_proxy_address.clone()); } + + if let Some(log_max_size_mb) = args.log_max_size_mb { + self.log_max_size_mb = Some(log_max_size_mb); + } + + if let Some(log_rotation_interval_hours) = args.log_rotation_interval_hours { + self.log_rotation_interval_hours = Some(log_rotation_interval_hours); + } + + if let Some(log_max_files) = args.log_max_files { + self.log_max_files = Some(log_max_files); + } + + if args.log_to_file { + self.log_to_file = Some(true); + } } fn build(self) -> io::Result { @@ -358,6 +387,11 @@ impl ConfigBuilder { .transpose()? .unwrap_or(LevelFilter::Debug); + let log_max_size_bytes = self.log_max_size_mb.unwrap_or(50) * 1024 * 1024; + let log_rotation_interval_secs = self.log_rotation_interval_hours.unwrap_or(24) * 60 * 60; + let log_max_files = self.log_max_files.unwrap_or(5); + let log_to_file = self.log_to_file.unwrap_or(true); + let lsps2_client_config = self .lsps2 .as_ref() @@ -428,6 +462,10 @@ impl ConfigBuilder { lsps2_service_config, log_level, log_file_path: self.log_file_path, + log_max_size_bytes: log_max_size_bytes as usize, + log_rotation_interval_secs, + log_max_files, + log_to_file, pathfinding_scores_source_url, metrics_enabled, poll_metrics_interval, @@ -497,6 +535,10 @@ struct EsploraConfig { struct LogConfig { level: Option, file: Option, + max_size_mb: Option, + rotation_interval_hours: Option, + max_files: Option, + log_to_file: Option, } #[derive(Deserialize, Serialize)] @@ -733,6 +775,34 @@ pub struct ArgsConfig { )] node_alias: Option, + #[arg( + long, + env = "LDK_SERVER_LOG_MAX_SIZE_MB", + help = "The maximum size of the log file in MB before rotation. Defaults to 50MB." + )] + log_max_size_mb: Option, + + #[arg( + long, + env = "LDK_SERVER_LOG_ROTATION_INTERVAL_HOURS", + help = "The maximum age of the log file in hours before rotation. Defaults to 24h." + )] + log_rotation_interval_hours: Option, + + #[arg( + long, + env = "LDK_SERVER_LOG_MAX_FILES", + help = "The maximum number of rotated log files to keep. Defaults to 5." + )] + log_max_files: Option, + + #[arg( + long, + env = "LDK_SERVER_LOG_TO_FILE", + help = "The option to enable logging to a file. Defaults to true. If false, logging to file is disabled." + )] + log_to_file: bool, + #[arg( long, env = "LDK_SERVER_BITCOIND_RPC_ADDRESS", @@ -896,6 +966,10 @@ mod tests { [log] level = "Trace" file = "/var/log/ldk-server.log" + max_size_mb = 50 + rotation_interval_hours = 24 + max_files = 5 + log_to_file = true [bitcoind] rpc_address = "127.0.0.1:8332" @@ -941,6 +1015,10 @@ mod tests { metrics_username: None, metrics_password: None, tor_proxy_address: None, + log_to_file: true, + log_max_size_mb: Some(50), + log_rotation_interval_hours: Some(24), + log_max_files: Some(5), } } @@ -962,6 +1040,10 @@ mod tests { metrics_username: None, metrics_password: None, tor_proxy_address: None, + log_to_file: true, + log_max_size_mb: None, + log_rotation_interval_hours: None, + log_max_files: None, } } @@ -1029,6 +1111,10 @@ mod tests { }), log_level: LevelFilter::Trace, log_file_path: Some("/var/log/ldk-server.log".to_string()), + log_max_size_bytes: 50 * 1024 * 1024, + log_rotation_interval_secs: 24 * 60 * 60, + log_max_files: 5, + log_to_file: true, pathfinding_scores_source_url: None, metrics_enabled: false, poll_metrics_interval: None, @@ -1344,6 +1430,10 @@ mod tests { metrics_password: None, tor_config: None, hrn_config: HumanReadableNamesConfig::default(), + log_max_size_bytes: 50 * 1024 * 1024, + log_rotation_interval_secs: 24 * 60 * 60, + log_max_files: 5, + log_to_file: true, }; assert_eq!(config.listening_addrs, expected.listening_addrs); @@ -1357,6 +1447,10 @@ mod tests { assert_eq!(config.pathfinding_scores_source_url, expected.pathfinding_scores_source_url); assert_eq!(config.metrics_enabled, expected.metrics_enabled); assert_eq!(config.tor_config, expected.tor_config); + assert_eq!(config.log_max_size_bytes, expected.log_max_size_bytes); + assert_eq!(config.log_rotation_interval_secs, expected.log_rotation_interval_secs); + assert_eq!(config.log_max_files, expected.log_max_files); + assert_eq!(config.log_to_file, expected.log_to_file); } #[test] @@ -1454,6 +1548,10 @@ mod tests { proxy_address: SocketAddress::from_str("127.0.0.1:9050").unwrap(), }), hrn_config: HumanReadableNamesConfig::default(), + log_max_size_bytes: 50 * 1024 * 1024, + log_rotation_interval_secs: 24 * 60 * 60, + log_max_files: 5, + log_to_file: false, }; assert_eq!(config.listening_addrs, expected.listening_addrs); diff --git a/ldk-server/src/util/logger.rs b/ldk-server/src/util/logger.rs index 5e27a98d..557b2db9 100644 --- a/ldk-server/src/util/logger.rs +++ b/ldk-server/src/util/logger.rs @@ -8,74 +8,151 @@ // licenses. use std::fs::{self, File, OpenOptions}; -use std::io::{self, Write}; +use std::io::{self, BufWriter, Write}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; +use std::time::SystemTime; use log::{Level, LevelFilter, Log, Metadata, Record}; +struct LoggerState { + file: BufWriter, + bytes_written: usize, + created_at: SystemTime, + log_max_size_bytes: usize, + log_rotation_interval_secs: u64, + log_max_files: usize, +} + /// A logger implementation that writes logs to both stderr and a file. /// /// The logger formats log messages with RFC3339 timestamps and writes them to: /// - stdout/stderr for console output -/// - A file specified during initialization +/// - A file specified during initialization (if enabled) /// /// All log messages follow the format: /// `[TIMESTAMP LEVEL TARGET FILE:LINE] MESSAGE` /// /// Example: `[2025-12-04T10:30:45Z INFO ldk_server:42] Starting up...` /// -/// The logger handles SIGHUP for log rotation by reopening the file handle when signaled. +/// The logger does a native size/time-based rotation and retains the last 5 logs by default, if `max_rotated_files` is unset. pub struct ServerLogger { /// The maximum log level to display level: LevelFilter, - /// The file to write logs to, protected by a mutex for thread-safe access - file: Mutex, + /// Groups the file and state in a single Mutex. None if file logging is disabled. + state: Option>, /// Path to the log file for reopening on SIGHUP log_file_path: PathBuf, } +pub struct LogConfig { + pub log_to_file: bool, + pub log_max_size_bytes: usize, + pub log_rotation_interval_secs: u64, + pub log_max_files: usize, +} + impl ServerLogger { /// Initializes the global logger with the specified level and file path. /// - /// Opens or creates the log file at the given path. If the file exists, logs are appended. + /// Opens or creates the log file at the given path. if `log_to_file` is true. + /// If the file exists, logs are appended. /// If the file doesn't exist, it will be created along with any necessary parent directories. /// /// This should be called once at application startup. Subsequent calls will fail. /// /// Returns an Arc to the logger for signal handling purposes. - pub fn init(level: LevelFilter, log_file_path: &Path) -> Result, io::Error> { - // Create parent directories if they don't exist - if let Some(parent) = log_file_path.parent() { - fs::create_dir_all(parent)?; - } + pub fn init( + level: LevelFilter, log_file_path: &Path, log_config: LogConfig, + ) -> Result, io::Error> { + let state = if log_config.log_to_file { + // Create parent directories if they don't exist + if let Some(parent) = log_file_path.parent() { + fs::create_dir_all(parent)?; + } + + let file = open_log_file(log_file_path)?; - let file = open_log_file(log_file_path)?; + // Check existing file metadata to persist size and age across node restarts + let metadata = fs::metadata(log_file_path); + let initial_size = metadata.as_ref().map(|m| m.len() as usize).unwrap_or(0); + let created_at = metadata + .and_then(|m| m.created().or_else(|_| m.modified())) + .unwrap_or_else(|_| SystemTime::now()); - let logger = Arc::new(ServerLogger { - level, - file: Mutex::new(file), - log_file_path: log_file_path.to_path_buf(), - }); + Some(Mutex::new(LoggerState { + file: BufWriter::new(file), + bytes_written: initial_size, + created_at, + log_max_size_bytes: log_config.log_max_size_bytes, + log_rotation_interval_secs: log_config.log_rotation_interval_secs, + log_max_files: log_config.log_max_files, + })) + } else { + None + }; + + let logger = + Arc::new(ServerLogger { level, log_file_path: log_file_path.to_path_buf(), state }); log::set_boxed_logger(Box::new(LoggerWrapper(Arc::clone(&logger)))) .map_err(io::Error::other)?; log::set_max_level(level); + Ok(logger) } - /// Reopens the log file. Called on SIGHUP for log rotation. + /// Reopens the log file. This flushes the current file writer and opens + /// the file at `log_file_path` again. + /// + /// Called on SIGHUP for log rotation. pub fn reopen(&self) -> Result<(), io::Error> { + if let Some(state_mutex) = &self.state { + if let Ok(mut state) = state_mutex.lock() { + state.file.flush()?; + let file = open_log_file(&self.log_file_path)?; + + // Reset size and age tracking for the new file + let metadata = fs::metadata(&self.log_file_path); + state.bytes_written = metadata.as_ref().map(|m| m.len() as usize).unwrap_or(0); + state.created_at = metadata + .and_then(|m| m.created().or_else(|_| m.modified())) + .unwrap_or_else(|_| SystemTime::now()); + + state.file = BufWriter::new(file); + return Ok(()); + } + return Err(io::Error::other("Logger state mutex poisoned")); + } + Ok(()) + } + + /// Flushes the current file, renames it with a timestamp, opens a fresh log, + /// and synchronously deletes older log files. + fn rotate(&self, state: &mut LoggerState) -> Result<(), io::Error> { + state.file.flush()?; + + let now = chrono::Utc::now().format("%Y-%m-%dT%H-%M-%SZ").to_string(); + let mut new_path = self.log_file_path.to_path_buf().into_os_string(); + new_path.push("."); + new_path.push(now); + let rotated_path = PathBuf::from(new_path); + + fs::rename(&self.log_file_path, &rotated_path)?; + let new_file = open_log_file(&self.log_file_path)?; - match self.file.lock() { - Ok(mut file) => { - // Flush the old buffer before replacing with the new file - file.flush()?; - *file = new_file; - Ok(()) - }, - Err(e) => Err(io::Error::other(format!("Failed to acquire lock: {e}"))), + state.file = BufWriter::new(new_file); + + // Reset our rotation triggers for the new file + state.bytes_written = 0; + state.created_at = SystemTime::now(); + + // Clean up old log files + if let Err(e) = cleanup_old_logs(&self.log_file_path, state.log_max_files) { + eprintln!("Failed to clean up old log files: {}", e); } + + Ok(()) } } @@ -89,43 +166,49 @@ impl Log for ServerLogger { let level_str = format_level(record.level()); let line = record.line().unwrap_or(0); + let log_line = format!( + "[{} {} {}:{}] {}", + format_timestamp(), + level_str, + record.target(), + line, + record.args() + ); + // Log to console - let _ = match record.level() { + match record.level() { Level::Error => { - writeln!( - io::stderr(), - "[{} {} {}:{}] {}", - format_timestamp(), - level_str, - record.target(), - line, - record.args() - ) + let _ = writeln!(io::stderr(), "{}", log_line); }, _ => { - writeln!( - io::stdout(), - "[{} {} {}:{}] {}", - format_timestamp(), - level_str, - record.target(), - line, - record.args() - ) + let _ = writeln!(io::stdout(), "{}", log_line); }, }; - // Log to file - if let Ok(mut file) = self.file.lock() { - let _ = writeln!( - file, - "[{} {} {}:{}] {}", - format_timestamp(), - level_str, - record.target(), - line, - record.args() - ); + if let Some(state_mutex) = &self.state { + // Log to file + let log_bytes = log_line.len() + 1; + + if let Ok(mut state) = state_mutex.lock() { + let mut needs_rotation = false; + + if state.bytes_written + log_bytes > state.log_max_size_bytes { + needs_rotation = true; + } else if let Ok(age) = SystemTime::now().duration_since(state.created_at) { + if age.as_secs() > state.log_rotation_interval_secs { + needs_rotation = true; + } + } + + if needs_rotation { + if let Err(e) = self.rotate(&mut state) { + eprintln!("Failed to rotate log file: {}", e); + } + } + + let _ = writeln!(state.file, "{}", log_line); + state.bytes_written += log_bytes; + } } } } @@ -133,8 +216,11 @@ impl Log for ServerLogger { fn flush(&self) { let _ = io::stdout().flush(); let _ = io::stderr().flush(); - if let Ok(mut file) = self.file.lock() { - let _ = file.flush(); + + if let Some(state_mutex) = &self.state { + if let Ok(mut state) = state_mutex.lock() { + let _ = state.file.flush(); + } } } } @@ -158,6 +244,29 @@ fn open_log_file(log_file_path: &Path) -> Result { OpenOptions::new().create(true).append(true).open(log_file_path) } +fn cleanup_old_logs(log_file_path: &Path, max_files: usize) -> io::Result<()> { + let parent = log_file_path.parent().unwrap_or_else(|| Path::new(".")); + let file_name = log_file_path.file_name().and_then(|n| n.to_str()).unwrap_or(""); + let mut entries: Vec<_> = fs::read_dir(parent)? + .filter_map(|entry| entry.ok()) + .filter(|entry| { + let name = entry.file_name().into_string().unwrap_or_default(); + name.starts_with(file_name) && name != file_name + }) + .collect(); + + // Sort by modification time (oldest first) + entries.sort_by_key(|e| e.metadata().and_then(|m| m.modified()).unwrap_or(SystemTime::now())); + + if entries.len() > max_files { + for entry in entries.iter().take(entries.len() - max_files) { + let _ = fs::remove_file(entry.path()); + } + } + + Ok(()) +} + /// Wrapper to allow Arc to implement Log trait struct LoggerWrapper(Arc);